diff --git a/assets/bitnami/kafka-25.3.0.tgz b/assets/bitnami/kafka-25.3.0.tgz new file mode 100644 index 000000000..0b675f771 Binary files /dev/null and b/assets/bitnami/kafka-25.3.0.tgz differ diff --git a/assets/bitnami/redis-18.1.2.tgz b/assets/bitnami/redis-18.1.2.tgz new file mode 100644 index 000000000..6fecaf8a8 Binary files /dev/null and b/assets/bitnami/redis-18.1.2.tgz differ diff --git a/assets/cockroach-labs/cockroachdb-11.2.1.tgz b/assets/cockroach-labs/cockroachdb-11.2.1.tgz new file mode 100644 index 000000000..0013f2719 Binary files /dev/null and b/assets/cockroach-labs/cockroachdb-11.2.1.tgz differ diff --git a/assets/confluent/confluent-for-kubernetes-0.824.14.tgz b/assets/confluent/confluent-for-kubernetes-0.824.14.tgz new file mode 100644 index 000000000..cd637bdda Binary files /dev/null and b/assets/confluent/confluent-for-kubernetes-0.824.14.tgz differ diff --git a/assets/datadog/datadog-3.38.4.tgz b/assets/datadog/datadog-3.38.4.tgz new file mode 100644 index 000000000..5147424c9 Binary files /dev/null and b/assets/datadog/datadog-3.38.4.tgz differ diff --git a/assets/datadog/datadog-operator-1.1.2.tgz b/assets/datadog/datadog-operator-1.1.2.tgz new file mode 100644 index 000000000..8f2051479 Binary files /dev/null and b/assets/datadog/datadog-operator-1.1.2.tgz differ diff --git a/assets/digitalis/vals-operator-0.7.7.tgz b/assets/digitalis/vals-operator-0.7.7.tgz new file mode 100644 index 000000000..3452834fa Binary files /dev/null and b/assets/digitalis/vals-operator-0.7.7.tgz differ diff --git a/assets/jfrog/artifactory-ha-107.68.13.tgz b/assets/jfrog/artifactory-ha-107.68.13.tgz new file mode 100644 index 000000000..6451123fe Binary files /dev/null and b/assets/jfrog/artifactory-ha-107.68.13.tgz differ diff --git a/assets/jfrog/artifactory-jcr-107.68.13.tgz b/assets/jfrog/artifactory-jcr-107.68.13.tgz new file mode 100644 index 000000000..07bd4a60c Binary files /dev/null and b/assets/jfrog/artifactory-jcr-107.68.13.tgz differ diff --git a/assets/kong/kong-2.28.1.tgz b/assets/kong/kong-2.28.1.tgz new file mode 100644 index 000000000..6ea7e109f Binary files /dev/null and b/assets/kong/kong-2.28.1.tgz differ diff --git a/assets/ngrok/kubernetes-ingress-controller-0.11.0.tgz b/assets/ngrok/kubernetes-ingress-controller-0.11.0.tgz new file mode 100644 index 000000000..8ff85135b Binary files /dev/null and b/assets/ngrok/kubernetes-ingress-controller-0.11.0.tgz differ diff --git a/assets/redpanda/redpanda-5.6.0.tgz b/assets/redpanda/redpanda-5.6.0.tgz new file mode 100644 index 000000000..7083f944c Binary files /dev/null and b/assets/redpanda/redpanda-5.6.0.tgz differ diff --git a/assets/stackstate/stackstate-k8s-agent-1.0.49.tgz b/assets/stackstate/stackstate-k8s-agent-1.0.49.tgz new file mode 100644 index 000000000..266af9608 Binary files /dev/null and b/assets/stackstate/stackstate-k8s-agent-1.0.49.tgz differ diff --git a/charts/bitnami/kafka/Chart.yaml b/charts/bitnami/kafka/Chart.yaml index 2faf70d71..a42a9357a 100644 --- a/charts/bitnami/kafka/Chart.yaml +++ b/charts/bitnami/kafka/Chart.yaml @@ -45,4 +45,4 @@ maintainers: name: kafka sources: - https://github.com/bitnami/charts/tree/main/bitnami/kafka -version: 25.2.0 +version: 25.3.0 diff --git a/charts/bitnami/kafka/README.md b/charts/bitnami/kafka/README.md index de69675e5..ca8f92636 100644 --- a/charts/bitnami/kafka/README.md +++ b/charts/bitnami/kafka/README.md @@ -120,20 +120,28 @@ The command removes all the Kubernetes components associated with the chart and ### Kafka SASL parameters -| Name | Description | Value | -| --------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | -| `sasl.enabledMechanisms` | Comma-separated list of allowed SASL mechanisms when SASL listeners are configured. Allowed types: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512` | `PLAIN,SCRAM-SHA-256,SCRAM-SHA-512` | -| `sasl.interBrokerMechanism` | SASL mechanism for inter broker communication. | `PLAIN` | -| `sasl.controllerMechanism` | SASL mechanism for controller communications. | `PLAIN` | -| `sasl.interbroker.user` | Username for inter-broker communications when SASL is enabled | `inter_broker_user` | -| `sasl.interbroker.password` | Password for inter-broker communications when SASL is enabled. If not set and SASL is enabled for the controller listener, a random password will be generated. | `""` | -| `sasl.controller.user` | Username for controller communications when SASL is enabled | `controller_user` | -| `sasl.controller.password` | Password for controller communications when SASL is enabled. If not set and SASL is enabled for the inter-broker listener, a random password will be generated. | `""` | -| `sasl.client.users` | Comma-separated list of usernames for client communications when SASL is enabled | `["user1"]` | -| `sasl.client.passwords` | Comma-separated list of passwords for client communications when SASL is enabled, must match the number of client.users | `""` | -| `sasl.zookeeper.user` | Username for zookeeper communications when SASL is enabled. | `""` | -| `sasl.zookeeper.password` | Password for zookeeper communications when SASL is enabled. | `""` | -| `sasl.existingSecret` | Name of the existing secret containing credentials for clientUsers, interBrokerUser, controllerUser and zookeeperUser | `""` | +| Name | Description | Value | +| ----------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | +| `sasl.enabledMechanisms` | Comma-separated list of allowed SASL mechanisms when SASL listeners are configured. Allowed types: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`, `OAUTHBEARER` | `PLAIN,SCRAM-SHA-256,SCRAM-SHA-512` | +| `sasl.interBrokerMechanism` | SASL mechanism for inter broker communication. | `PLAIN` | +| `sasl.controllerMechanism` | SASL mechanism for controller communications. | `PLAIN` | +| `sasl.oauthbearer.tokenEndpointUrl` | The URL for the OAuth/OIDC identity provider | `""` | +| `sasl.oauthbearer.jwksEndpointUrl` | The OAuth/OIDC provider URL from which the provider's JWKS (JSON Web Key Set) can be retrieved | `""` | +| `sasl.oauthbearer.expectedAudience` | The comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences | `""` | +| `sasl.oauthbearer.subClaimName` | The OAuth claim name for the subject. | `sub` | +| `sasl.interbroker.user` | Username for inter-broker communications when SASL is enabled | `inter_broker_user` | +| `sasl.interbroker.password` | Password for inter-broker communications when SASL is enabled. If not set and SASL is enabled for the controller listener, a random password will be generated. | `""` | +| `sasl.interbroker.clientId` | Client ID for inter-broker communications when SASL is enabled with mechanism OAUTHBEARER | `inter_broker_client` | +| `sasl.interbroker.clientSecret` | Client Secret for inter-broker communications when SASL is enabled with mechanism OAUTHBEARER. If not set and SASL is enabled for the controller listener, a random secret will be generated. | `""` | +| `sasl.controller.user` | Username for controller communications when SASL is enabled | `controller_user` | +| `sasl.controller.password` | Password for controller communications when SASL is enabled. If not set and SASL is enabled for the inter-broker listener, a random password will be generated. | `""` | +| `sasl.controller.clientId` | Client ID for controller communications when SASL is enabled with mechanism OAUTHBEARER | `controller_broker_client` | +| `sasl.controller.clientSecret` | Client Secret for controller communications when SASL is enabled with mechanism OAUTHBEARER. If not set and SASL is enabled for the inter-broker listener, a random secret will be generated. | `""` | +| `sasl.client.users` | Comma-separated list of usernames for client communications when SASL is enabled | `["user1"]` | +| `sasl.client.passwords` | Comma-separated list of passwords for client communications when SASL is enabled, must match the number of client.users | `""` | +| `sasl.zookeeper.user` | Username for zookeeper communications when SASL is enabled. | `""` | +| `sasl.zookeeper.password` | Password for zookeeper communications when SASL is enabled. | `""` | +| `sasl.existingSecret` | Name of the existing secret containing credentials for clientUsers, interBrokerUser, controllerUser and zookeeperUser | `""` | ### Kafka TLS parameters diff --git a/charts/bitnami/kafka/templates/NOTES.txt b/charts/bitnami/kafka/templates/NOTES.txt index baeb77944..69d285913 100644 --- a/charts/bitnami/kafka/templates/NOTES.txt +++ b/charts/bitnami/kafka/templates/NOTES.txt @@ -77,6 +77,13 @@ To connect a client to your Kafka, you need to create the 'client.properties' co security.protocol={{ .Values.listeners.client.protocol }} {{- if $clientSaslEnabled }} +{{- if regexFind "OAUTHBEARER" (upper .Values.sasl.enabledMechanisms ) }} +sasl.jaas.config="org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required"\ + clientId="" \ + password=""; +sasl.login.callback.handler.class=org.apache.kafka.common.security.oauthbearer.secured.OAuthBearerLoginCallbackHandler +sasl.oauthbearer.token.endpoint.url={{ .Values.sasl.oauthbearer.tokenEndpointUrl }} +{{- else }} {{- if regexFind "SCRAM-SHA-256" (upper .Values.sasl.enabledMechanisms) }} sasl.mechanism=SCRAM-SHA-256 {{- else if regexFind "SCRAM-SHA-512" (upper .Values.sasl.enabledMechanisms) }} @@ -89,6 +96,7 @@ sasl.jaas.config={{ $securityModule }} \ username="{{ index .Values.sasl.client.users 0 }}" \ password="$(kubectl get secret {{ $fullname }}-user-passwords --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)"; {{- end }} +{{- end }} {{- if $clientSslEnabled }} {{- $clientTlsType := upper .Values.tls.type }} ssl.truststore.type={{ $clientTlsType }} @@ -265,6 +273,13 @@ To connect a client to your Kafka, you need to create the 'client.properties' co security.protocol={{ .Values.listeners.external.protocol }} {{- if $externalSaslEnabled }} +{{- if regexFind "OAUTHBEARER" (upper .Values.sasl.enabledMechanisms ) }} +sasl.jaas.config="org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required"\ + clientId="" \ + password=""; +sasl.login.callback.handler.class=org.apache.kafka.common.security.oauthbearer.secured.OAuthBearerLoginCallbackHandler +sasl.oauthbearer.token.endpoint.url={{ .Values.sasl.oauthbearer.tokenEndpointUrl }} +{{- else }} {{- if regexFind "SCRAM-SHA-256" (upper .Values.sasl.enabledMechanisms) }} sasl.mechanism=SCRAM-SHA-256 {{- else if regexFind "SCRAM-SHA-512" (upper .Values.sasl.enabledMechanisms) }} @@ -277,6 +292,7 @@ sasl.jaas.config={{ $securityModule }} \ username="{{ index .Values.sasl.client.users 0 }}" \ password="$(kubectl get secret {{ $fullname }}-user-passwords --namespace {{ $releaseNamespace }} -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)"; {{- end }} +{{- end }} {{- if $externalSslEnabled }} {{- $clientTlsType := upper .Values.tls.type }} ssl.truststore.type={{ $clientTlsType }} diff --git a/charts/bitnami/kafka/templates/_helpers.tpl b/charts/bitnami/kafka/templates/_helpers.tpl index a5a83c675..86af975a5 100644 --- a/charts/bitnami/kafka/templates/_helpers.tpl +++ b/charts/bitnami/kafka/templates/_helpers.tpl @@ -167,6 +167,41 @@ Return true if SASL connections should be configured {{- end -}} {{- end -}} +{{/* +Returns true if a sasl mechanism that uses usernames and passwords is in use +*/}} +{{- define "kafka.saslUserPasswordsEnabled" -}} +{{- if (include "kafka.saslEnabled" .) -}} +{{- if or (regexFind "PLAIN" (upper .Values.sasl.enabledMechanisms)) (regexFind "SCRAM" (upper .Values.sasl.enabledMechanisms)) -}} +true +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if a sasl mechanism that uses client IDs and client secrets is in use +*/}} +{{- define "kafka.saslClientSecretsEnabled" -}} +{{- if (include "kafka.saslEnabled" .) -}} +{{- if (regexFind "OAUTHBEARER" (upper .Values.sasl.enabledMechanisms)) -}} +true +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the security module based on the provided sasl mechanism +*/}} +{{- define "kafka.saslSecurityModule" -}} +{{- if eq "PLAIN" .mechanism -}} +org.apache.kafka.common.security.plain.PlainLoginModule required +{{- else if regexFind "SCRAM" .mechanism -}} +org.apache.kafka.common.security.scram.ScramLoginModule required +{{- else if eq "OAUTHBEARER" .mechanism -}} +org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required +{{- end -}} +{{- end -}} + {{/* Return the Kafka SASL credentials secret */}} @@ -418,7 +453,9 @@ The exporter uses a different nomenclature so we need to do this hack */}} {{- define "kafka.metrics.kafka.saslMechanism" -}} {{- $saslMechanisms := .Values.sasl.enabledMechanisms }} -{{- if contains "SCRAM-SHA-512" (upper $saslMechanisms) }} +{{- if contains "OAUTHBEARER" (upper $saslMechanisms) }} + {{- print "oauthbearer" -}} +{{- else if contains "SCRAM-SHA-512" (upper $saslMechanisms) }} {{- print "scram-sha512" -}} {{- else if contains "SCRAM-SHA-256" (upper $saslMechanisms) }} {{- print "scram-sha256" -}} @@ -597,12 +634,18 @@ listener.name.{{lower $listener.name}}.ssl.client.auth={{ $listener.sslClientAut {{- end }} {{- if regexFind "SASL" (upper $listener.protocol) }} {{- range $mechanism := ( splitList "," $.Values.sasl.enabledMechanisms )}} - {{- $securityModule := ternary "org.apache.kafka.common.security.plain.PlainLoginModule required" "org.apache.kafka.common.security.scram.ScramLoginModule required" (eq "PLAIN" (upper $mechanism)) }} + {{- $securityModule := include "kafka.saslSecurityModule" (dict "mechanism" (upper $mechanism)) }} {{- $saslJaasConfig := list $securityModule }} {{- if eq $listener.name $.Values.listeners.interbroker.name }} + {{- if (eq (upper $mechanism) "OAUTHBEARER") }} + {{- $saslJaasConfig = append $saslJaasConfig (printf "clientId=\"%s\"" $.Values.sasl.interbroker.clientId) }} + {{- $saslJaasConfig = append $saslJaasConfig (print "clientSecret=\"interbroker-client-secret-placeholder\"") }} +listener.name.{{lower $listener.name}}.oauthbearer.sasl.login.callback.handler.class=org.apache.kafka.common.security.oauthbearer.secured.OAuthBearerLoginCallbackHandler + {{- else }} {{- $saslJaasConfig = append $saslJaasConfig (printf "username=\"%s\"" $.Values.sasl.interbroker.user) }} {{- $saslJaasConfig = append $saslJaasConfig (print "password=\"interbroker-password-placeholder\"") }} {{- end }} + {{- end }} {{- if eq (upper $mechanism) "PLAIN" }} {{- if eq $listener.name $.Values.listeners.interbroker.name }} {{- $saslJaasConfig = append $saslJaasConfig (printf "user_%s=\"interbroker-password-placeholder\"" $.Values.sasl.interbroker.user) }} @@ -612,9 +655,18 @@ listener.name.{{lower $listener.name}}.ssl.client.auth={{ $listener.sslClientAut {{- end }} {{- end }} listener.name.{{lower $listener.name}}.{{lower $mechanism}}.sasl.jaas.config={{ join " " $saslJaasConfig }}; + {{- if eq (upper $mechanism) "OAUTHBEARER" }} +listener.name.{{lower $listener.name}}.oauthbearer.sasl.server.callback.handler.class=org.apache.kafka.common.security.oauthbearer.secured.OAuthBearerValidatorCallbackHandler + {{- end }} {{- end }} {{- end }} {{- end }} +{{- if regexFind "OAUTHBEARER" $.Values.sasl.enabledMechanisms }} +sasl.oauthbearer.token.endpoint.url={{ $.Values.sasl.oauthbearer.tokenEndpointUrl }} +sasl.oauthbearer.jwks.endpoint.url={{ $.Values.sasl.oauthbearer.jwksEndpointUrl }} +sasl.oauthbearer.expected.audience={{ $.Values.sasl.oauthbearer.expectedAudience }} +sasl.oauthbearer.sub.claim.name={{ $.Values.sasl.oauthbearer.subClaimName }} +{{- end }} # End of SASL JAAS configuration {{- end }} {{- end -}} @@ -655,10 +707,15 @@ listener.name.{{lower $listener.name}}.ssl.client.auth={{ $listener.sslClientAut {{- end }} {{- if regexFind "SASL" (upper $listener.protocol) }} {{- $mechanism := $.Values.sasl.controllerMechanism }} - {{- $securityModule := ternary "org.apache.kafka.common.security.plain.PlainLoginModule required" "org.apache.kafka.common.security.scram.ScramLoginModule required" (eq "PLAIN" (upper $mechanism)) }} + {{- $securityModule := include "kafka.saslSecurityModule" (dict "mechanism" (upper $mechanism)) }} {{- $saslJaasConfig := list $securityModule }} + {{- if (eq (upper $mechanism) "OAUTHBEARER") }} + {{- $saslJaasConfig = append $saslJaasConfig (printf "clientId=\"%s\"" $.Values.sasl.controller.clientId) }} + {{- $saslJaasConfig = append $saslJaasConfig (print "clientSecret=\"controller-client-secret-placeholder\"") }} + {{- else }} {{- $saslJaasConfig = append $saslJaasConfig (printf "username=\"%s\"" $.Values.sasl.controller.user) }} {{- $saslJaasConfig = append $saslJaasConfig (print "password=\"controller-password-placeholder\"") }} + {{- end }} {{- if eq (upper $mechanism) "PLAIN" }} {{- $saslJaasConfig = append $saslJaasConfig (printf "user_%s=\"controller-password-placeholder\"" $.Values.sasl.controller.user) }} {{- end }} @@ -666,6 +723,10 @@ listener.name.{{lower $listener.name}}.ssl.client.auth={{ $listener.sslClientAut sasl.mechanism.controller.protocol={{ upper $mechanism }} listener.name.{{lower $listener.name}}.sasl.enabled.mechanisms={{ upper $mechanism }} listener.name.{{lower $listener.name}}.{{lower $mechanism }}.sasl.jaas.config={{ join " " $saslJaasConfig }}; +{{- if regexFind "OAUTHBEARER" (upper $mechanism) }} +listener.name.{{lower $listener.name}}.oauthbearer.sasl.server.callback.handler.class=org.apache.kafka.common.security.oauthbearer.secured.OAuthBearerValidatorCallbackHandler +listener.name.{{lower $listener.name}}.oauthbearer.sasl.login.callback.handler.class=org.apache.kafka.common.security.oauthbearer.secured.OAuthBearerLoginCallbackHandler +{{- end }} {{- end }} {{- end -}} @@ -753,6 +814,7 @@ Init container definition for Kafka initialization {{- end }} {{- end }} {{- if and (include "kafka.client.saslEnabled" .context ) .context.Values.sasl.client.users }} + {{- if (include "kafka.saslUserPasswordsEnabled" .context) }} - name: KAFKA_CLIENT_USERS value: {{ join "," .context.Values.sasl.client.users | quote }} - name: KAFKA_CLIENT_PASSWORDS @@ -761,7 +823,9 @@ Init container definition for Kafka initialization name: {{ include "kafka.saslSecretName" .context }} key: client-passwords {{- end }} + {{- end }} {{- if regexFind "SASL" (upper .context.Values.listeners.interbroker.protocol) }} + {{- if (include "kafka.saslUserPasswordsEnabled" .context) }} - name: KAFKA_INTER_BROKER_USER value: {{ .context.Values.sasl.interbroker.user | quote }} - name: KAFKA_INTER_BROKER_PASSWORD @@ -770,13 +834,36 @@ Init container definition for Kafka initialization name: {{ include "kafka.saslSecretName" .context }} key: inter-broker-password {{- end }} + {{- if (include "kafka.saslClientSecretsEnabled" .context) }} + - name: KAFKA_INTER_BROKER_CLIENT_ID + value: {{ .context.Values.sasl.interbroker.clientId | quote }} + - name: KAFKA_INTER_BROKER_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" .context }} + key: inter-broker-client-secret + {{- end }} + {{- end }} {{- if and .context.Values.kraft.enabled (regexFind "SASL" (upper .context.Values.listeners.controller.protocol)) }} + {{- if (include "kafka.saslUserPasswordsEnabled" .context) }} + - name: KAFKA_CONTROLLER_USER + value: {{ .context.Values.sasl.controller.user | quote }} - name: KAFKA_CONTROLLER_PASSWORD valueFrom: secretKeyRef: name: {{ include "kafka.saslSecretName" .context }} key: controller-password {{- end }} + {{- if (include "kafka.saslClientSecretsEnabled" .context) }} + - name: KAFKA_CONTROLLER_CLIENT_ID + value: {{ .context.Values.sasl.controller.clientId | quote }} + - name: KAFKA_CONTROLLER_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" .context }} + key: controller-client-secret + {{- end }} + {{- end }} {{- if (include "kafka.sslEnabled" .context ) }} - name: KAFKA_TLS_TYPE value: {{ ternary "PEM" "JKS" (or .context.Values.tls.autoGenerated (eq (upper .context.Values.tls.type) "PEM")) }} diff --git a/charts/bitnami/kafka/templates/broker/statefulset.yaml b/charts/bitnami/kafka/templates/broker/statefulset.yaml index 4a5974ab3..621b674fa 100644 --- a/charts/bitnami/kafka/templates/broker/statefulset.yaml +++ b/charts/bitnami/kafka/templates/broker/statefulset.yaml @@ -167,7 +167,7 @@ spec: - name: KAFKA_KRAFT_BOOTSTRAP_SCRAM_USERS value: "true" {{- end }} - {{- if and (include "kafka.client.saslEnabled" . ) .Values.sasl.client.users }} + {{- if and (include "kafka.client.saslEnabled" . ) .Values.sasl.client.users (include "kafka.saslUserPasswordsEnabled" .) }} - name: KAFKA_CLIENT_USERS value: {{ join "," .Values.sasl.client.users | quote }} - name: KAFKA_CLIENT_PASSWORDS @@ -177,6 +177,7 @@ spec: key: client-passwords {{- end }} {{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} - name: KAFKA_INTER_BROKER_USER value: {{ .Values.sasl.interbroker.user | quote }} - name: KAFKA_INTER_BROKER_PASSWORD @@ -185,7 +186,18 @@ spec: name: {{ include "kafka.saslSecretName" . }} key: inter-broker-password {{- end }} + {{- if (include "kafka.saslClientSecretsEnabled" .) }} + - name: KAFKA_INTER_BROKER_CLIENT_ID + value: {{ .Values.sasl.interbroker.clientId | quote }} + - name: KAFKA_INTER_BROKER_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: inter-broker-client-secret + {{- end }} + {{- end }} {{- if and .Values.kraft.enabled (regexFind "SASL" (upper .Values.listeners.controller.protocol)) }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} - name: KAFKA_CONTROLLER_USER value: {{ .Values.sasl.controller.user | quote }} - name: KAFKA_CONTROLLER_PASSWORD @@ -194,6 +206,16 @@ spec: name: {{ include "kafka.saslSecretName" . }} key: controller-password {{- end }} + {{- if (include "kafka.saslClientSecretsEnabled" .) }} + - name: KAFKA_CONTROLLER_CLIENT_ID + value: {{ .Values.sasl.controller.clientId | quote }} + - name: KAFKA_CONTROLLER_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: controller-client-secret + {{- end }} + {{- end }} {{- end }} {{- if .Values.metrics.jmx.enabled }} - name: JMX_PORT diff --git a/charts/bitnami/kafka/templates/controller-eligible/statefulset.yaml b/charts/bitnami/kafka/templates/controller-eligible/statefulset.yaml index 6a4a8fe00..54bbbf58d 100644 --- a/charts/bitnami/kafka/templates/controller-eligible/statefulset.yaml +++ b/charts/bitnami/kafka/templates/controller-eligible/statefulset.yaml @@ -156,7 +156,7 @@ spec: {{- if and (include "kafka.saslEnabled" .) (or (regexFind "SCRAM" (upper .Values.sasl.enabledMechanisms)) (regexFind "SCRAM" (upper .Values.sasl.controllerMechanism)) (regexFind "SCRAM" (upper .Values.sasl.interBrokerMechanism))) }} - name: KAFKA_KRAFT_BOOTSTRAP_SCRAM_USERS value: "true" - {{- if and (include "kafka.client.saslEnabled" . ) .Values.sasl.client.users }} + {{- if and (include "kafka.client.saslEnabled" . ) .Values.sasl.client.users (include "kafka.saslUserPasswordsEnabled" .) }} - name: KAFKA_CLIENT_USERS value: {{ join "," .Values.sasl.client.users | quote }} - name: KAFKA_CLIENT_PASSWORDS @@ -166,6 +166,7 @@ spec: key: client-passwords {{- end }} {{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} - name: KAFKA_INTER_BROKER_USER value: {{ .Values.sasl.interbroker.user | quote }} - name: KAFKA_INTER_BROKER_PASSWORD @@ -174,7 +175,18 @@ spec: name: {{ include "kafka.saslSecretName" . }} key: inter-broker-password {{- end }} + {{- if (include "kafka.saslClientSecretsEnabled" .) }} + - name: KAFKA_INTER_BROKER_CLIENT_ID + value: {{ .Values.sasl.interbroker.clientId | quote }} + - name: KAFKA_INTER_BROKER_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: inter-broker-client-secret + {{- end }} + {{- end }} {{- if regexFind "SASL" (upper .Values.listeners.controller.protocol) }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} - name: KAFKA_CONTROLLER_USER value: {{ .Values.sasl.controller.user | quote }} - name: KAFKA_CONTROLLER_PASSWORD @@ -183,6 +195,16 @@ spec: name: {{ include "kafka.saslSecretName" . }} key: controller-password {{- end }} + {{- if (include "kafka.saslClientSecretsEnabled" .) }} + - name: KAFKA_CONTROLLER_CLIENT_ID + value: {{ .Values.sasl.controller.clientId | quote }} + - name: KAFKA_CONTROLLER_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: controller-client-secret + {{- end }} + {{- end }} {{- end }} {{- if .Values.metrics.jmx.enabled }} - name: JMX_PORT diff --git a/charts/bitnami/kafka/templates/provisioning/job.yaml b/charts/bitnami/kafka/templates/provisioning/job.yaml index adddad9fe..82c83eb92 100644 --- a/charts/bitnami/kafka/templates/provisioning/job.yaml +++ b/charts/bitnami/kafka/templates/provisioning/job.yaml @@ -135,6 +135,11 @@ spec: {{- else if regexFind "SCRAM-SHA-512" ( upper .Values.sasl.enabledMechanisms) }} kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism SCRAM-SHA-512 kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- else if regexFind "OAUTHBEARER" ( upper .Values.sasl.enabledMechanisms) }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism OAUTHBEARER + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required clientId=\"$SASL_CLIENT_ID\" password=\"$SASL_CLIENT_SECRET\";" + kafka_common_conf_set "$CLIENT_CONF" sasl.login.callback.handler.class "org.apache.kafka.common.security.oauthbearer.secured.OAuthBearerLoginCallbackHandler" + kafka_common_conf_set "$CLIENT_CONF" sasl.oauthbearer.token.endpoint.url {{ .Values.sasl.oauthbearer.tokenEndpointUrl | quote }} {{- end }} {{- end }} fi @@ -199,6 +204,7 @@ spec: - name: KAFKA_SERVICE value: {{ printf "%s:%d" (include "common.names.fullname" .) (.Values.service.ports.client | int64) }} {{- if regexFind "SASL" (upper .Values.listeners.client.protocol) }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} - name: SASL_USERNAME value: {{ index .Values.sasl.client.users 0 | quote }} - name: SASL_USER_PASSWORD @@ -207,6 +213,16 @@ spec: name: {{ include "kafka.saslSecretName" . }} key: system-user-password {{- end }} + {{- if (include "kafka.saslClientSecretsEnabled" .) }} + - name: SASL_CLIENT_ID + value: {{ .Values.sasl.interbroker.clientId | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.saslSecretName" . }} + key: inter-broker-client-secret + {{- end }} + {{- end }} {{- if .Values.provisioning.extraEnvVars }} {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.extraEnvVars "context" $) | nindent 12 }} {{- end }} diff --git a/charts/bitnami/kafka/templates/scripts-configmap.yaml b/charts/bitnami/kafka/templates/scripts-configmap.yaml index 316bb1ef9..472cb47e2 100644 --- a/charts/bitnami/kafka/templates/scripts-configmap.yaml +++ b/charts/bitnami/kafka/templates/scripts-configmap.yaml @@ -292,11 +292,21 @@ data: # Replace placeholders with passwords {{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} replace_placeholder "interbroker-password-placeholder" "$KAFKA_INTER_BROKER_PASSWORD" + {{- end }} + {{- if (include "kafka.saslClientSecretsEnabled" .) }} + replace_placeholder "interbroker-client-secret-placeholder" "$KAFKA_INTER_BROKER_CLIENT_SECRET" + {{- end }} {{- end -}} {{- if and .Values.kraft.enabled (regexFind "SASL" (upper .Values.listeners.controller.protocol)) }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} replace_placeholder "controller-password-placeholder" "$KAFKA_CONTROLLER_PASSWORD" {{- end }} + {{- if (include "kafka.saslClientSecretsEnabled" .) }} + replace_placeholder "controller-client-secret-placeholder" "$KAFKA_CONTROLLER_CLIENT_SECRET" + {{- end }} + {{- end }} {{- if (include "kafka.client.saslEnabled" .)}} read -r -a passwords <<<"$(tr ',;' ' ' <<<"${KAFKA_CLIENT_PASSWORDS:-}")" for ((i = 0; i < ${#passwords[@]}; i++)); do diff --git a/charts/bitnami/kafka/templates/secrets.yaml b/charts/bitnami/kafka/templates/secrets.yaml index abd2e9a63..7243ee417 100644 --- a/charts/bitnami/kafka/templates/secrets.yaml +++ b/charts/bitnami/kafka/templates/secrets.yaml @@ -32,18 +32,30 @@ data: {{- end }} {{- $secretValue = join "," $clientPasswords | toString | b64enc }} {{- end }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} client-passwords: {{ $secretValue | quote }} system-user-password: {{ index (splitList "," (b64dec $secretValue)) 0 | b64enc | quote }} {{- end }} + {{- end }} {{- if or .Values.sasl.zookeeper.user .Values.zookeeper.auth.client.enabled }} zookeeper-password: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "zookeeper-password" "providedValues" (list "sasl.zookeeper.password" "zookeeper.auth.client.clientPassword") "failOnNew" false "context" $) }} {{- end }} {{- if regexFind "SASL" (upper .Values.listeners.interbroker.protocol) }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} inter-broker-password: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "inter-broker-password" "providedValues" (list "sasl.interbroker.password") "failOnNew" false "context" $) }} {{- end }} + {{- if (include "kafka.saslClientSecretsEnabled" .) }} + inter-broker-client-secret: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "inter-broker-client-secret" "providedValues" (list "sasl.interbroker.clientSecret") "failOnNew" false "context" $) }} + {{- end }} + {{- end }} {{- if regexFind "SASL" (upper .Values.listeners.controller.protocol) }} + {{- if (include "kafka.saslUserPasswordsEnabled" .) }} controller-password: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "controller-password" "providedValues" (list "sasl.controller.password") "failOnNew" false "context" $) }} {{- end }} + {{- if (include "kafka.saslClientSecretsEnabled" .) }} + controller-client-secret: {{ include "common.secrets.passwords.manage" (dict "secret" $secretName "key" "controller-client-secret" "providedValues" (list "sasl.controller.clientSecret") "failOnNew" false "context" $) }} + {{- end }} + {{- end }} {{- if .Values.serviceBindings.enabled }} {{- if (include "kafka.client.saslEnabled" .) }} diff --git a/charts/bitnami/kafka/values.yaml b/charts/bitnami/kafka/values.yaml index 562af4f30..6faaf4740 100644 --- a/charts/bitnami/kafka/values.yaml +++ b/charts/bitnami/kafka/values.yaml @@ -198,7 +198,7 @@ listeners: ## Kafka SASL settings for authentication, required if SASL_PLAINTEXT or SASL_SSL listeners are configured ## sasl: - ## @param sasl.enabledMechanisms Comma-separated list of allowed SASL mechanisms when SASL listeners are configured. Allowed types: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512` + ## @param sasl.enabledMechanisms Comma-separated list of allowed SASL mechanisms when SASL listeners are configured. Allowed types: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`, `OAUTHBEARER` ## NOTE: At the moment, Kafka Raft mode does not support SCRAM, that is why only PLAIN is configured. ## enabledMechanisms: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512 @@ -208,20 +208,39 @@ sasl: ## @param sasl.controllerMechanism SASL mechanism for controller communications. ## controllerMechanism: PLAIN + ## Settings for oauthbearer mechanism + ## @param sasl.oauthbearer.tokenEndpointUrl The URL for the OAuth/OIDC identity provider + ## @param sasl.oauthbearer.jwksEndpointUrl The OAuth/OIDC provider URL from which the provider's JWKS (JSON Web Key Set) can be retrieved + ## @param sasl.oauthbearer.expectedAudience The comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences + ## @param sasl.oauthbearer.subClaimName The OAuth claim name for the subject. + ## + oauthbearer: + tokenEndpointUrl: "" + jwksEndpointUrl: "" + expectedAudience: "" + subClaimName: "sub" ## Credentials for inter-broker communications. ## @param sasl.interbroker.user Username for inter-broker communications when SASL is enabled ## @param sasl.interbroker.password Password for inter-broker communications when SASL is enabled. If not set and SASL is enabled for the controller listener, a random password will be generated. + ## @param sasl.interbroker.clientId Client ID for inter-broker communications when SASL is enabled with mechanism OAUTHBEARER + ## @param sasl.interbroker.clientSecret Client Secret for inter-broker communications when SASL is enabled with mechanism OAUTHBEARER. If not set and SASL is enabled for the controller listener, a random secret will be generated. ## interbroker: user: inter_broker_user password: "" + clientId: inter_broker_client + clientSecret: "" ## Credentials for controller communications. ## @param sasl.controller.user Username for controller communications when SASL is enabled ## @param sasl.controller.password Password for controller communications when SASL is enabled. If not set and SASL is enabled for the inter-broker listener, a random password will be generated. + ## @param sasl.controller.clientId Client ID for controller communications when SASL is enabled with mechanism OAUTHBEARER + ## @param sasl.controller.clientSecret Client Secret for controller communications when SASL is enabled with mechanism OAUTHBEARER. If not set and SASL is enabled for the inter-broker listener, a random secret will be generated. ## controller: user: controller_user password: "" + clientId: controller_broker_client + clientSecret: "" ## Credentials for client communications. ## @param sasl.client.users Comma-separated list of usernames for client communications when SASL is enabled ## @param sasl.client.passwords Comma-separated list of passwords for client communications when SASL is enabled, must match the number of client.users @@ -239,8 +258,10 @@ sasl: password: "" ## @param sasl.existingSecret Name of the existing secret containing credentials for clientUsers, interBrokerUser, controllerUser and zookeeperUser ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: - ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=controller-password=CONTROLLER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD - ## + ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=inter-broker-client-secret=INTER_BROKER_CLIENT_SECRET --from-literal=controller-password=CONTROLLER_PASSWORD --from-literal=controller-client-secret=CONTROLLER_CLIENT_SECRET --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD + ## The client secrets are only required when using oauthbearer as sasl mechanism. + ## Client, interbroker and controller passwords are only required if the sasl mechanism includes something other than oauthbearer. + ## existingSecret: "" ## @section Kafka TLS parameters diff --git a/charts/bitnami/redis/Chart.lock b/charts/bitnami/redis/Chart.lock index ef686f92b..f5fb20a77 100644 --- a/charts/bitnami/redis/Chart.lock +++ b/charts/bitnami/redis/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.10.0 -digest: sha256:023ded170632d04528f30332370f34fc8fb96efb2886a01d934cb3bd6e6d2e09 -generated: "2023-09-05T11:35:55.621686+02:00" + version: 2.13.0 +digest: sha256:6b6084c51b6a028a651f6e8539d0197487ee807c5bae44867d4ea6ccd1f9ae93 +generated: "2023-09-29T11:06:04.261917+02:00" diff --git a/charts/bitnami/redis/Chart.yaml b/charts/bitnami/redis/Chart.yaml index e309370c5..d0c0bc94d 100644 --- a/charts/bitnami/redis/Chart.yaml +++ b/charts/bitnami/redis/Chart.yaml @@ -37,4 +37,4 @@ maintainers: name: redis sources: - https://github.com/bitnami/charts/tree/main/bitnami/redis -version: 18.1.1 +version: 18.1.2 diff --git a/charts/bitnami/redis/charts/common/Chart.yaml b/charts/bitnami/redis/charts/common/Chart.yaml index e35c2d0e7..4da7ec09e 100644 --- a/charts/bitnami/redis/charts/common/Chart.yaml +++ b/charts/bitnami/redis/charts/common/Chart.yaml @@ -2,7 +2,7 @@ annotations: category: Infrastructure licenses: Apache-2.0 apiVersion: v2 -appVersion: 2.9.2 +appVersion: 2.13.0 description: A Library Helm Chart for grouping common logic between bitnami charts. This chart is not deployable by itself. home: https://bitnami.com @@ -20,4 +20,4 @@ name: common sources: - https://github.com/bitnami/charts type: library -version: 2.10.0 +version: 2.13.0 diff --git a/charts/bitnami/redis/charts/common/templates/_capabilities.tpl b/charts/bitnami/redis/charts/common/templates/_capabilities.tpl index c6d115fe5..b1257397d 100644 --- a/charts/bitnami/redis/charts/common/templates/_capabilities.tpl +++ b/charts/bitnami/redis/charts/common/templates/_capabilities.tpl @@ -172,6 +172,50 @@ Return the appropriate apiVersion for Vertical Pod Autoscaler. {{- end -}} {{- end -}} +{{/* +Returns true if PodSecurityPolicy is supported +*/}} +{{- define "common.capabilities.psp.supported" -}} +{{- if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if AdmissionConfiguration is supported +*/}} +{{- define "common.capabilities.admisionConfiguration.supported" -}} +{{- if semverCompare ">=1.23-0" (include "common.capabilities.kubeVersion" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for AdmissionConfiguration. +*/}} +{{- define "common.capabilities.admisionConfiguration.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiserver.config.k8s.io/v1alpha1" -}} +{{- else if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiserver.config.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiserver.config.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityConfiguration. +*/}} +{{- define "common.capabilities.podSecurityConfiguration.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "pod-security.admission.config.k8s.io/v1alpha1" -}} +{{- else if semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "pod-security.admission.config.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "pod-security.admission.config.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + {{/* Returns true if the used Helm version is 3.3+. A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. diff --git a/charts/bitnami/redis/charts/common/templates/_images.tpl b/charts/bitnami/redis/charts/common/templates/_images.tpl index 2181f3224..e248d6d08 100644 --- a/charts/bitnami/redis/charts/common/templates/_images.tpl +++ b/charts/bitnami/redis/charts/common/templates/_images.tpl @@ -83,3 +83,19 @@ imagePullSecrets: {{- end }} {{- end }} {{- end -}} + +{{/* +Return the proper image version (ingores image revision/prerelease info & fallbacks to chart appVersion) +{{ include "common.images.version" ( dict "imageRoot" .Values.path.to.the.image "chart" .Chart ) }} +*/}} +{{- define "common.images.version" -}} +{{- $imageTag := .imageRoot.tag | toString -}} +{{/* regexp from https://github.com/Masterminds/semver/blob/23f51de38a0866c5ef0bfc42b3f735c73107b700/version.go#L41-L44 */}} +{{- if regexMatch `^([0-9]+)(\.[0-9]+)?(\.[0-9]+)?(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?$` $imageTag -}} + {{- $version := semver $imageTag -}} + {{- printf "%d.%d.%d" $version.Major $version.Minor $version.Patch -}} +{{- else -}} + {{- print .chart.AppVersion -}} +{{- end -}} +{{- end -}} + diff --git a/charts/bitnami/redis/charts/common/templates/_labels.tpl b/charts/bitnami/redis/charts/common/templates/_labels.tpl index fac46076a..fa3833fb8 100644 --- a/charts/bitnami/redis/charts/common/templates/_labels.tpl +++ b/charts/bitnami/redis/charts/common/templates/_labels.tpl @@ -11,21 +11,19 @@ Kubernetes standard labels */}} {{- define "common.labels.standard" -}} {{- if and (hasKey . "customLabels") (hasKey . "context") -}} -{{ merge - (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) - (dict - "app.kubernetes.io/name" (include "common.names.name" .context) - "helm.sh/chart" (include "common.names.chart" .context) - "app.kubernetes.io/instance" .context.Release.Name - "app.kubernetes.io/managed-by" .context.Release.Service - ) - | toYaml -}} +{{- $default := dict "app.kubernetes.io/name" (include "common.names.name" .context) "helm.sh/chart" (include "common.names.chart" .context) "app.kubernetes.io/instance" .context.Release.Name "app.kubernetes.io/managed-by" .context.Release.Service -}} +{{- with .context.Chart.AppVersion -}} +{{- $_ := set $default "app.kubernetes.io/version" . -}} +{{- end -}} +{{ template "common.tplvalues.merge" (dict "values" (list .customLabels $default) "context" .) }} {{- else -}} app.kubernetes.io/name: {{ include "common.names.name" . }} helm.sh/chart: {{ include "common.names.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Chart.AppVersion }} +app.kubernetes.io/version: {{ . | quote }} +{{- end -}} {{- end -}} {{- end -}} @@ -40,14 +38,7 @@ overwrote them on metadata.labels fields. */}} {{- define "common.labels.matchLabels" -}} {{- if and (hasKey . "customLabels") (hasKey . "context") -}} -{{ merge - (pick (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) "app.kubernetes.io/name" "app.kubernetes.io/instance") - (dict - "app.kubernetes.io/name" (include "common.names.name" .context) - "app.kubernetes.io/instance" .context.Release.Name - ) - | toYaml -}} +{{ merge (pick (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) "app.kubernetes.io/name" "app.kubernetes.io/instance") (dict "app.kubernetes.io/name" (include "common.names.name" .context) "app.kubernetes.io/instance" .context.Release.Name ) | toYaml }} {{- else -}} app.kubernetes.io/name: {{ include "common.names.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/bitnami/redis/charts/common/templates/_utils.tpl b/charts/bitnami/redis/charts/common/templates/_utils.tpl index c87040cd9..bfbddf054 100644 --- a/charts/bitnami/redis/charts/common/templates/_utils.tpl +++ b/charts/bitnami/redis/charts/common/templates/_utils.tpl @@ -65,3 +65,13 @@ Usage: {{- end -}} {{- printf "%s" $key -}} {{- end -}} + +{{/* +Checksum a template at "path" containing a *single* resource (ConfigMap,Secret) for use in pod annotations, excluding the metadata (see #18376). +Usage: +{{ include "common.utils.checksumTemplate" (dict "path" "/configmap.yaml" "context" $) }} +*/}} +{{- define "common.utils.checksumTemplate" -}} +{{- $obj := include (print .context.Template.BasePath .path) .context | fromYaml -}} +{{ omit $obj "apiVersion" "kind" "metadata" | toYaml | sha256sum }} +{{- end -}} diff --git a/charts/bitnami/redis/templates/master/psp.yaml b/charts/bitnami/redis/templates/master/psp.yaml index 0809c902a..368a2193b 100644 --- a/charts/bitnami/redis/templates/master/psp.yaml +++ b/charts/bitnami/redis/templates/master/psp.yaml @@ -3,8 +3,7 @@ Copyright VMware, Inc. SPDX-License-Identifier: APACHE-2.0 */}} -{{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} -{{- if and $pspAvailable .Values.podSecurityPolicy.create }} +{{- if and (include "common.capabilities.psp.supported" .) .Values.podSecurityPolicy.create }} apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: diff --git a/charts/bitnami/redis/templates/role.yaml b/charts/bitnami/redis/templates/role.yaml index 914e500ff..be042294b 100644 --- a/charts/bitnami/redis/templates/role.yaml +++ b/charts/bitnami/redis/templates/role.yaml @@ -14,8 +14,7 @@ metadata: annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} rules: - {{- $pspAvailable := (semverCompare "<1.25-0" (include "common.capabilities.kubeVersion" .)) -}} - {{- if and $pspAvailable .Values.podSecurityPolicy.enabled }} + {{- if and (include "common.capabilities.psp.supported" .) .Values.podSecurityPolicy.enabled }} - apiGroups: - '{{ template "podSecurityPolicy.apiGroup" . }}' resources: diff --git a/charts/cockroach-labs/cockroachdb/Chart.yaml b/charts/cockroach-labs/cockroachdb/Chart.yaml index bc93f2ded..8ed09db6c 100644 --- a/charts/cockroach-labs/cockroachdb/Chart.yaml +++ b/charts/cockroach-labs/cockroachdb/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>=1.8-0' catalog.cattle.io/release-name: cockroachdb apiVersion: v1 -appVersion: 23.1.10 +appVersion: 23.1.11 description: CockroachDB is a scalable, survivable, strongly-consistent SQL database. home: https://www.cockroachlabs.com icon: https://raw.githubusercontent.com/cockroachdb/cockroach/master/docs/media/cockroach_db.png @@ -14,4 +14,4 @@ maintainers: name: cockroachdb sources: - https://github.com/cockroachdb/cockroach -version: 11.2.0 +version: 11.2.1 diff --git a/charts/cockroach-labs/cockroachdb/README.md b/charts/cockroach-labs/cockroachdb/README.md index b312a3243..3cdc61ead 100644 --- a/charts/cockroach-labs/cockroachdb/README.md +++ b/charts/cockroach-labs/cockroachdb/README.md @@ -229,10 +229,10 @@ kubectl get pods \ ``` ``` -my-release-cockroachdb-0 cockroachdb/cockroach:v23.1.10 -my-release-cockroachdb-1 cockroachdb/cockroach:v23.1.10 -my-release-cockroachdb-2 cockroachdb/cockroach:v23.1.10 -my-release-cockroachdb-3 cockroachdb/cockroach:v23.1.10 +my-release-cockroachdb-0 cockroachdb/cockroach:v23.1.11 +my-release-cockroachdb-1 cockroachdb/cockroach:v23.1.11 +my-release-cockroachdb-2 cockroachdb/cockroach:v23.1.11 +my-release-cockroachdb-3 cockroachdb/cockroach:v23.1.11 ``` Resume normal operations. Once you are comfortable that the stability and performance of the cluster is what you'd expect post-upgrade, finalize the upgrade: @@ -316,7 +316,7 @@ For details see the [`values.yaml`](values.yaml) file. | `conf.store.size` | CockroachDB storage size | `""` | | `conf.store.attrs` | CockroachDB storage attributes | `""` | | `image.repository` | Container image name | `cockroachdb/cockroach` | -| `image.tag` | Container image tag | `v23.1.10` | +| `image.tag` | Container image tag | `v23.1.11` | | `image.pullPolicy` | Container pull policy | `IfNotPresent` | | `image.credentials` | `registry`, `user` and `pass` credentials to pull private image | `{}` | | `statefulset.replicas` | StatefulSet replicas number | `3` | diff --git a/charts/cockroach-labs/cockroachdb/values.yaml b/charts/cockroach-labs/cockroachdb/values.yaml index e3381a480..a70c3afe9 100644 --- a/charts/cockroach-labs/cockroachdb/values.yaml +++ b/charts/cockroach-labs/cockroachdb/values.yaml @@ -7,7 +7,7 @@ fullnameOverride: "" image: repository: cockroachdb/cockroach - tag: v23.1.10 + tag: v23.1.11 pullPolicy: IfNotPresent credentials: {} # registry: docker.io diff --git a/charts/confluent/confluent-for-kubernetes/Chart.yaml b/charts/confluent/confluent-for-kubernetes/Chart.yaml index 2f9e78bc9..4b7d0c83b 100644 --- a/charts/confluent/confluent-for-kubernetes/Chart.yaml +++ b/charts/confluent/confluent-for-kubernetes/Chart.yaml @@ -19,4 +19,4 @@ maintainers: name: confluent-for-kubernetes sources: - https://docs.confluent.io/current/index.html -version: 0.824.2 +version: 0.824.14 diff --git a/charts/confluent/confluent-for-kubernetes/values.yaml b/charts/confluent/confluent-for-kubernetes/values.yaml index c9a4dd1f3..c5ce662da 100644 --- a/charts/confluent/confluent-for-kubernetes/values.yaml +++ b/charts/confluent/confluent-for-kubernetes/values.yaml @@ -81,7 +81,7 @@ image: registry: docker.io repository: confluentinc/confluent-operator pullPolicy: IfNotPresent - tag: "0.824.2" + tag: "0.824.14" ### ## Priority class for Confluent Operator pod diff --git a/charts/datadog/datadog-operator/CHANGELOG.md b/charts/datadog/datadog-operator/CHANGELOG.md index 2c8ffc87e..46e961871 100644 --- a/charts/datadog/datadog-operator/CHANGELOG.md +++ b/charts/datadog/datadog-operator/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## 1.1.2 + +* Add configuration for Operator flag `operatorMetricsEnabled` : this parameter can be used to disable the Operator metrics forwarder. It is enabled by default. + ## 1.1.1 * Add permissions to curl `/metrics/slis` to operator cluster role. diff --git a/charts/datadog/datadog-operator/Chart.yaml b/charts/datadog/datadog-operator/Chart.yaml index 486b30e56..3a7e2a57a 100644 --- a/charts/datadog/datadog-operator/Chart.yaml +++ b/charts/datadog/datadog-operator/Chart.yaml @@ -26,4 +26,4 @@ name: datadog-operator sources: - https://app.datadoghq.com/account/settings#agent/kubernetes - https://github.com/DataDog/datadog-agent -version: 1.1.1 +version: 1.1.2 diff --git a/charts/datadog/datadog-operator/README.md b/charts/datadog/datadog-operator/README.md index c24dab900..850df07f8 100644 --- a/charts/datadog/datadog-operator/README.md +++ b/charts/datadog/datadog-operator/README.md @@ -1,6 +1,6 @@ # Datadog Operator -![Version: 1.1.1](https://img.shields.io/badge/Version-1.1.1-informational?style=flat-square) ![AppVersion: 1.1.0](https://img.shields.io/badge/AppVersion-1.1.0-informational?style=flat-square) +![Version: 1.1.2](https://img.shields.io/badge/Version-1.1.2-informational?style=flat-square) ![AppVersion: 1.1.0](https://img.shields.io/badge/AppVersion-1.1.0-informational?style=flat-square) ## Values @@ -36,6 +36,7 @@ | metricsPort | int | `8383` | Port used for OpenMetrics endpoint | | nameOverride | string | `""` | Override name of app | | nodeSelector | object | `{}` | Allows to schedule Datadog Operator on specific nodes | +| operatorMetricsEnabled | string | `"true"` | Enable forwarding of Datadog Operator metrics and events to Datadog. | | podAnnotations | object | `{}` | Allows setting additional annotations for Datadog Operator PODs | | podLabels | object | `{}` | Allows setting additional labels for for Datadog Operator PODs | | rbac.create | bool | `true` | Specifies whether the RBAC resources should be created | diff --git a/charts/datadog/datadog-operator/templates/deployment.yaml b/charts/datadog/datadog-operator/templates/deployment.yaml index f696b57c8..b2ada1f84 100644 --- a/charts/datadog/datadog-operator/templates/deployment.yaml +++ b/charts/datadog/datadog-operator/templates/deployment.yaml @@ -93,6 +93,7 @@ spec: - "-logEncoder=json" - "-metrics-addr=:{{ .Values.metricsPort }}" - "-loglevel={{ .Values.logLevel }}" + - "-operatorMetricsEnabled={{ .Values.operatorMetricsEnabled }}" {{- if and (not (empty .Values.datadogCRDs.migration.datadogAgents.conversionWebhook.enabled)) (semverCompare ">=1.0.0-0" .Values.image.tag ) }} - "-webhookEnabled={{ .Values.datadogCRDs.migration.datadogAgents.conversionWebhook.enabled }}" {{- else }} diff --git a/charts/datadog/datadog-operator/values.yaml b/charts/datadog/datadog-operator/values.yaml index a1a4ad3e7..fd05da3d0 100644 --- a/charts/datadog/datadog-operator/values.yaml +++ b/charts/datadog/datadog-operator/values.yaml @@ -58,6 +58,8 @@ logLevel: "info" maximumGoroutines: # supportExtendedDaemonset -- If true, supports using ExtendedDaemonSet CRD supportExtendedDaemonset: "false" +# operatorMetricsEnabled -- Enable forwarding of Datadog Operator metrics and events to Datadog. +operatorMetricsEnabled: "true" # metricsPort -- Port used for OpenMetrics endpoint metricsPort: 8383 secretBackend: diff --git a/charts/datadog/datadog/CHANGELOG.md b/charts/datadog/datadog/CHANGELOG.md index 50a6ad3ef..0325844c9 100644 --- a/charts/datadog/datadog/CHANGELOG.md +++ b/charts/datadog/datadog/CHANGELOG.md @@ -1,5 +1,9 @@ # Datadog changelog +## 3.38.4 + +* Add `orchestrator_explorer.enabled` for the Agent + ## 3.38.3 * Update `fips.image.tag` to `0.6.0` diff --git a/charts/datadog/datadog/Chart.yaml b/charts/datadog/datadog/Chart.yaml index 625902af3..599dcb8d7 100644 --- a/charts/datadog/datadog/Chart.yaml +++ b/charts/datadog/datadog/Chart.yaml @@ -19,4 +19,4 @@ name: datadog sources: - https://app.datadoghq.com/account/settings#agent/kubernetes - https://github.com/DataDog/datadog-agent -version: 3.38.3 +version: 3.38.4 diff --git a/charts/datadog/datadog/README.md b/charts/datadog/datadog/README.md index 3d4cc8ad1..d97692b6c 100644 --- a/charts/datadog/datadog/README.md +++ b/charts/datadog/datadog/README.md @@ -1,6 +1,6 @@ # Datadog -![Version: 3.38.3](https://img.shields.io/badge/Version-3.38.3-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square) +![Version: 3.38.4](https://img.shields.io/badge/Version-3.38.4-informational?style=flat-square) ![AppVersion: 7](https://img.shields.io/badge/AppVersion-7-informational?style=flat-square) [Datadog](https://www.datadoghq.com/) is a hosted infrastructure monitoring platform. This chart adds the Datadog Agent to all nodes in your cluster via a DaemonSet. It also optionally depends on the [kube-state-metrics chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics). For more information about monitoring Kubernetes with Datadog, please refer to the [Datadog documentation website](https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/). diff --git a/charts/datadog/datadog/templates/_container-agent.yaml b/charts/datadog/datadog/templates/_container-agent.yaml index 1e199823f..9c8576808 100644 --- a/charts/datadog/datadog/templates/_container-agent.yaml +++ b/charts/datadog/datadog/templates/_container-agent.yaml @@ -148,6 +148,8 @@ - name: DD_CHECKS_TAG_CARDINALITY value: {{ .Values.datadog.checksCardinality | quote }} {{- end }} + - name: DD_ORCHESTRATOR_EXPLORER_ENABLED + value: {{ (include "should-enable-k8s-resource-monitoring" .) | quote }} - name: DD_EXPVAR_PORT value: {{ .Values.datadog.expvarPort | quote }} - name: DD_COMPLIANCE_CONFIG_ENABLED diff --git a/charts/digitalis/vals-operator/Chart.yaml b/charts/digitalis/vals-operator/Chart.yaml index 5d900567e..8695c743f 100644 --- a/charts/digitalis/vals-operator/Chart.yaml +++ b/charts/digitalis/vals-operator/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>= 1.19.0-0' catalog.cattle.io/release-name: vals-operator apiVersion: v2 -appVersion: v0.7.6 +appVersion: v0.7.7 description: 'This helm chart installs the Digitalis Vals Operator to manage and sync secrets from supported backends into Kubernetes. ## About Vals-Operator Here at [Digitalis](https://digitalis.io) we love [vals](https://github.com/helmfile/vals), @@ -20,4 +20,4 @@ maintainers: name: Digitalis.IO name: vals-operator type: application -version: 0.7.6 +version: 0.7.7 diff --git a/charts/digitalis/vals-operator/README.md b/charts/digitalis/vals-operator/README.md index 2b7c9bacf..1338b762d 100644 --- a/charts/digitalis/vals-operator/README.md +++ b/charts/digitalis/vals-operator/README.md @@ -1,6 +1,6 @@ # vals-operator -![Version: 0.7.5](https://img.shields.io/badge/Version-0.7.5-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.7.5](https://img.shields.io/badge/AppVersion-v0.7.5-informational?style=flat-square) +![Version: 0.7.7](https://img.shields.io/badge/Version-0.7.7-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: v0.7.7](https://img.shields.io/badge/AppVersion-v0.7.7-informational?style=flat-square) This helm chart installs the Digitalis Vals Operator to manage and sync secrets from supported backends into Kubernetes. ## About Vals-Operator diff --git a/charts/jfrog/artifactory-ha/CHANGELOG.md b/charts/jfrog/artifactory-ha/CHANGELOG.md index 7123f652a..d96bb987c 100644 --- a/charts/jfrog/artifactory-ha/CHANGELOG.md +++ b/charts/jfrog/artifactory-ha/CHANGELOG.md @@ -1,7 +1,7 @@ # JFrog Artifactory-ha Chart Changelog All changes to this chart will be documented in this file -## [107.68.11] - Sep 20, 2023 +## [107.68.13] - Sep 20, 2023 * Fixed rtfs context * Fixed - Metadata service does not respect customVolumeMounts for DB CAs [GH-1815](https://github.com/jfrog/charts/issues/1815) diff --git a/charts/jfrog/artifactory-ha/Chart.yaml b/charts/jfrog/artifactory-ha/Chart.yaml index 97fc28231..23497a806 100644 --- a/charts/jfrog/artifactory-ha/Chart.yaml +++ b/charts/jfrog/artifactory-ha/Chart.yaml @@ -4,7 +4,7 @@ annotations: catalog.cattle.io/kube-version: '>= 1.14.0-0' catalog.cattle.io/release-name: artifactory-ha apiVersion: v2 -appVersion: 7.68.11 +appVersion: 7.68.13 dependencies: - condition: postgresql.enabled name: postgresql @@ -26,4 +26,4 @@ name: artifactory-ha sources: - https://github.com/jfrog/charts type: application -version: 107.68.11 +version: 107.68.13 diff --git a/charts/jfrog/artifactory-jcr/CHANGELOG.md b/charts/jfrog/artifactory-jcr/CHANGELOG.md index a63810854..1a6ffbb57 100644 --- a/charts/jfrog/artifactory-jcr/CHANGELOG.md +++ b/charts/jfrog/artifactory-jcr/CHANGELOG.md @@ -1,7 +1,7 @@ # JFrog Container Registry Chart Changelog All changes to this chart will be documented in this file. -## [107.68.11] - Jul 20, 2023 +## [107.68.13] - Jul 20, 2023 * Disabled federation services when splitServicesToContainers=true ## [107.45.0] - Aug 25, 2022 diff --git a/charts/jfrog/artifactory-jcr/Chart.yaml b/charts/jfrog/artifactory-jcr/Chart.yaml index a5c52770d..ba815659d 100644 --- a/charts/jfrog/artifactory-jcr/Chart.yaml +++ b/charts/jfrog/artifactory-jcr/Chart.yaml @@ -4,11 +4,11 @@ annotations: catalog.cattle.io/kube-version: '>= 1.14.0-0' catalog.cattle.io/release-name: artifactory-jcr apiVersion: v2 -appVersion: 7.68.11 +appVersion: 7.68.13 dependencies: - name: artifactory repository: file://./charts/artifactory - version: 107.68.11 + version: 107.68.13 description: JFrog Container Registry home: https://jfrog.com/container-registry/ icon: https://raw.githubusercontent.com/jfrog/charts/ea5c3112c24a973f64f3ccd99747323db292a369/stable/artifactory-jcr/logo/jcr-logo.png @@ -27,4 +27,4 @@ name: artifactory-jcr sources: - https://github.com/jfrog/charts type: application -version: 107.68.11 +version: 107.68.13 diff --git a/charts/jfrog/artifactory-jcr/charts/artifactory/CHANGELOG.md b/charts/jfrog/artifactory-jcr/charts/artifactory/CHANGELOG.md index e78c9dfad..0c875b043 100644 --- a/charts/jfrog/artifactory-jcr/charts/artifactory/CHANGELOG.md +++ b/charts/jfrog/artifactory-jcr/charts/artifactory/CHANGELOG.md @@ -1,7 +1,7 @@ # JFrog Artifactory Chart Changelog All changes to this chart will be documented in this file. -## [107.68.11] - Sep 20, 2023 +## [107.68.13] - Sep 20, 2023 * Fixed rtfs context * Fixed - Metadata service does not respect customVolumeMounts for DB CAs [GH-1815](https://github.com/jfrog/charts/issues/1815) diff --git a/charts/jfrog/artifactory-jcr/charts/artifactory/Chart.yaml b/charts/jfrog/artifactory-jcr/charts/artifactory/Chart.yaml index 62d16e900..e4b1e81dd 100644 --- a/charts/jfrog/artifactory-jcr/charts/artifactory/Chart.yaml +++ b/charts/jfrog/artifactory-jcr/charts/artifactory/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 7.68.11 +appVersion: 7.68.13 dependencies: - condition: postgresql.enabled name: postgresql @@ -21,4 +21,4 @@ name: artifactory sources: - https://github.com/jfrog/charts type: application -version: 107.68.11 +version: 107.68.13 diff --git a/charts/kong/kong/CHANGELOG.md b/charts/kong/kong/CHANGELOG.md index 06e6e8289..68ba596f7 100644 --- a/charts/kong/kong/CHANGELOG.md +++ b/charts/kong/kong/CHANGELOG.md @@ -4,6 +4,14 @@ Nothing yet. +## 2.28.1 + +### Fixed + +* The admission webhook now includes Gateway API resources and Ingress + resources for controller versions 2.12+. This version introduces new + validations for Kong's regex path implementation. + ## 2.28.0 ### Improvements diff --git a/charts/kong/kong/Chart.yaml b/charts/kong/kong/Chart.yaml index b92c271e3..c3629f5a3 100644 --- a/charts/kong/kong/Chart.yaml +++ b/charts/kong/kong/Chart.yaml @@ -20,4 +20,4 @@ maintainers: name: kong sources: - https://github.com/Kong/charts/tree/main/charts/kong -version: 2.28.0 +version: 2.28.1 diff --git a/charts/kong/kong/templates/admission-webhook.yaml b/charts/kong/kong/templates/admission-webhook.yaml index f887ccf77..f306acd2f 100644 --- a/charts/kong/kong/templates/admission-webhook.yaml +++ b/charts/kong/kong/templates/admission-webhook.yaml @@ -80,6 +80,28 @@ webhooks: - UPDATE resources: - secrets +{{- if (semverCompare ">= 2.12.0" (include "kong.effectiveVersion" .Values.ingressController.image)) }} + - apiGroups: + - networking.k8s.io + apiVersions: + - 'v1' + operations: + - CREATE + - UPDATE + resources: + - ingresses + - apiGroups: + - gateway.networking.k8s.io + apiVersions: + - 'v1alpha2' + - 'v1beta1' + operations: + - CREATE + - UPDATE + resources: + - gateways + - httproutes +{{- end }} clientConfig: {{- if not .Values.ingressController.admissionWebhook.certificate.provided }} caBundle: {{ b64enc $caCert }} diff --git a/charts/ngrok/kubernetes-ingress-controller/.helmignore b/charts/ngrok/kubernetes-ingress-controller/.helmignore new file mode 100644 index 000000000..faa119839 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/.helmignore @@ -0,0 +1,25 @@ +# Source: https://github.com/helm/helm/blob/main/pkg/repo/repotest/testdata/examplechart/.helmignore +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj + +# helmtest plugin tests +tests diff --git a/charts/ngrok/kubernetes-ingress-controller/CHANGELOG.md b/charts/ngrok/kubernetes-ingress-controller/CHANGELOG.md new file mode 100644 index 000000000..bf8934b5b --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/CHANGELOG.md @@ -0,0 +1,106 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## 0.11.0 + +** Important ** This version of the controller changes the ownership model for https edge and tunnel CRs. To ease out the transition to the new ownership, make sure to run `migrate-edges.sh` and `migrate-tunnels.sh` scripts before installing the new version. + +### Changed +- Specify IPPolicyRule action as an enum of (allow,deny) as part of [#260](https://github.com/ngrok/kubernetes-ingress-controller/pull/260) +- Handle special case for changing auth types that causes an error during state transition [#259](https://github.com/ngrok/kubernetes-ingress-controller/pull/259) +- Better handling when changing pathType between 'Exact' and 'Prefix' [#262](https://github.com/ngrok/kubernetes-ingress-controller/pull/262) +- Update ngrok-go to 1.4.0 [#298](https://github.com/ngrok/kubernetes-ingress-controller/pull/298) +- Tunnels are now unique in their respective namespace, not across the cluster [#281](https://github.com/ngrok/kubernetes-ingress-controller/pull/281) +- The CRs that ingress controller creates are uniquely marked and managed by it. Other CRs created manually are no longer deleted when the ingress controller is not using them [#267](https://github.com/ngrok/kubernetes-ingress-controller/issues/267); fixed for tunnel in [#285](https://github.com/ngrok/kubernetes-ingress-controller/pull/285) and for https edges in [#286](https://github.com/ngrok/kubernetes-ingress-controller/pull/286) +- Better error handling and retry, specifically for the case where we try to create an https edge for a domain which is not created yet [#283](https://github.com/ngrok/kubernetes-ingress-controller/issues/283); fixed in [#288](https://github.com/ngrok/kubernetes-ingress-controller/pull/288) +- Watch and apply ngrok module set CR changes [#287](https://github.com/ngrok/kubernetes-ingress-controller/issues/287); fixed in [#290](https://github.com/ngrok/kubernetes-ingress-controller/pull/290) +- Label https edges and tunnels with service UID to make them more unique within ngrok [#291](https://github.com/ngrok/kubernetes-ingress-controller/issues/291); fixed in [#293](https://github.com/ngrok/kubernetes-ingress-controller/pull/293) and [#302](https://github.com/ngrok/kubernetes-ingress-controller/pull/302) + +### Added +- Add support for configuring pod affinities, pod disruption budget, and priorityClassName [#258](https://github.com/ngrok/kubernetes-ingress-controller/pull/258) +- The controller stopping at the first resource create [#270](https://github.com/ngrok/kubernetes-ingress-controller/pull/270) +- Using `make deploy` now requires `NGROK_AUTHTOKEN` and `NGROK_API_KEY` to be set [#292](https://github.com/ngrok/kubernetes-ingress-controller/pull/292) + +## 0.10.0 + +### Added +- Support HTTPS backends via service annotation [#238](https://github.com/ngrok/kubernetes-ingress-controller/pull/238) + +### Changed +- Normalize all ngrok `.io` TLD to `.app` TLD [#240](https://github.com/ngrok/kubernetes-ingress-controller/pull/240) +- Chart Icon + +### Fixed +- Add namespace to secret [#244](https://github.com/ngrok/kubernetes-ingress-controller/pull/244). Thank you for the contribution, @vincetse! + +## 0.9.0 +### Added +- Add a 'podLabels' option to the helm chart [#212](https://github.com/ngrok/kubernetes-ingress-controller/pull/212). +- Permission to `get`,`list`, and `watch` `services` [#222](https://github.com/ngrok-kubernetes-ingress-controller/pull/222). + +## 0.8.0 +### Changed +- Log Level configuration to helm chart [#199](https://github.com/ngrok/kubernetes-ingress-controller/pull/199). +- Bump default controller image to use `0.6.0` release [#204](https://github.com/ngrok/kubernetes-ingress-controller/pull/204). + +### Fixed +- update default-container annotation so logs work correctly [#197](https://github.com/ngrok/kubernetes-ingress-controller/pull/197) + +## 0.7.0 + +### Added +- Update `NgrokModuleSet` and `HTTPSEdge` CRD to support SAML and OAuth + +### Changed +- Update appVersion to `0.5.0` to match the latest release of the controller. + +## 0.6.1 +### Fixed +- Default the image tag to the chart's `appVersion` for predictable installs. Previously, the helm chart would default to the `latest` image tag which can have breaking changes, notably with CRDs. + +## 0.6.0 +### Changed +- Ingress Class has Default set to false [#109](https://github.com/ngrok/kubernetes-ingress-controller/pull/109) + +### Added +- Allow controller name to be configured to support multiple ngrok ingress classes [#159](https://github.com/ngrok/kubernetes-ingress-controller/pull/159) +- Allow the controller to be configured to only watch a single namespace [#157](https://github.com/ngrok/kubernetes-ingress-controller/pull/157) +- Pass key/value pairs to helm that get added as json string metadata in ngrok api resources [#156](https://github.com/ngrok/kubernetes-ingress-controller/pull/156) +- Add IP Policy CRD and IP Policy Route Module [#120](https://github.com/ngrok/kubernetes-ingress-controller/pull/120) +- Load certs from the directory `"/etc/ssl/certs/ngrok/"` for ngrok-go if present [#111](https://github.com/ngrok/kubernetes-ingress-controller/pull/111) + +## 0.5.0 +### Changed +- Renamed chart from `ngrok-ingress-controller` to `kubernetes-ingress-controller`. +- Added CRDs for `domains`, `tcpedges`, and `httpsedges`. + +## 0.4.0 +### Added +- `serverAddr` flag to override the ngrok tunnel server address +- `extraVolumes` to add an arbitrary set of volumes to the controller pod +- `extraVolumeMounts` to add an arbitrary set of volume mounts to the controller container + +## 0.3.1 +### Fixed +- Fixes rendering of `NOTES.txt` when installing via helm + +## 0.3.0 +### Changed + +- Moved from calling ngrok-agent sidecar to using the ngrok-go library in the controller process. +- Moved `apiKey` and `authtoken` to `credentials.apiKey` and `credentials.authtoken` respectively. +- `credentialSecrets.name` is now `credentials.secret.name` +- Changed replicas to 1 by default to work better for default/demo setup. + +## 0.2.0 +### Added + +- Support for different values commonly found in helm charts + +# 0.1.0 + +TODO diff --git a/charts/ngrok/kubernetes-ingress-controller/Chart.lock b/charts/ngrok/kubernetes-ingress-controller/Chart.lock new file mode 100644 index 000000000..eeddc5e19 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 2.10.1 +digest: sha256:54cb57fbf004b3cf03fe382619b87c9d17469340f3d24f506a2dbec185a9455a +generated: "2023-09-08T12:48:02.907551-04:00" diff --git a/charts/ngrok/kubernetes-ingress-controller/Chart.yaml b/charts/ngrok/kubernetes-ingress-controller/Chart.yaml new file mode 100644 index 000000000..b8b1539b5 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/Chart.yaml @@ -0,0 +1,25 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: ngrok Ingress Controller + catalog.cattle.io/release-name: kubernetes-ingress-controller +apiVersion: v2 +appVersion: 0.9.0 +dependencies: +- name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x +description: A Kubernetes ingress controller built using ngrok. +home: https://ngrok.com +icon: https://assets-global.website-files.com/63ed4bc7a4b189da942a6b8c/6411ffa0b395a44345ed2b1a_Frame%201.svg +keywords: +- ngrok +- networking +- ingress +- edge +- api gateway +name: kubernetes-ingress-controller +sources: +- https://github.com/ngrok/kubernetes-ingress-controller +version: 0.11.0 diff --git a/charts/ngrok/kubernetes-ingress-controller/README.md b/charts/ngrok/kubernetes-ingress-controller/README.md new file mode 100644 index 000000000..c44eb8eb3 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/README.md @@ -0,0 +1,90 @@ +# ngrok Ingress Controller + +This is the helm chart to install the ngrok ingress controller + +# Usage + +## Prerequisites + +The cluster Must be setup with a secret named `ngrok-ingress-controller-credentials` with the following keys: +* AUTHTOKEN +* API\_KEY + +## Install the controller with helm + +[Helm](https://helm.sh) must be installed to use the charts. Please refer to +Helm's [documentation](https://helm.sh/docs) to get started. + +Once Helm has been set up correctly, add the repo as follows: + +`helm repo add ngrok https://ngrok.github.io/kubernetes-ingress-controller` + +If you had already added this repo earlier, run `helm repo update` to retrieve +the latest versions of the packages. You can then run `helm search repo ngrok` to see the charts. + +To install the ngrok-ingress-controller chart: + +`helm install my-ngrok-ingress-controller ngrok/kubernetes-ingress-controller` + +To uninstall the chart: + +`helm delete my-ngrok-ingress-controller` + + +## Parameters + +### Common parameters + +| Name | Description | Value | +| ------------------- | ----------------------------------------------------- | ----- | +| `nameOverride` | String to partially override generated resource names | `""` | +| `fullnameOverride` | String to fully override generated resource names | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | + + +### Controller parameters + +| Name | Description | Value | +| ------------------------------------ | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------- | +| `podAnnotations` | Used to apply custom annotations to the ingress pods. | `{}` | +| `podLabels` | Used to apply custom labels to the ingress pods. | `{}` | +| `replicaCount` | The number of controllers to run. | `1` | +| `image.registry` | The ngrok ingress controller image registry. | `docker.io` | +| `image.repository` | The ngrok ingress controller image repository. | `ngrok/kubernetes-ingress-controller` | +| `image.tag` | The ngrok ingress controller image tag. Defaults to the chart's appVersion if not specified | `""` | +| `image.pullPolicy` | The ngrok ingress controller image pull policy. | `IfNotPresent` | +| `image.pullSecrets` | An array of imagePullSecrets to be used when pulling the image. | `[]` | +| `ingressClass.name` | The name of the ingress class to use. | `ngrok` | +| `ingressClass.create` | Whether to create the ingress class. | `true` | +| `ingressClass.default` | Whether to set the ingress class as default. | `false` | +| `controllerName` | The name of the controller to look for matching ingress classes | `k8s.ngrok.com/ingress-controller` | +| `watchNamespace` | The namespace to watch for ingress resources. Defaults to all | `""` | +| `credentials.secret.name` | The name of the secret the credentials are in. If not provided, one will be generated using the helm release name. | `""` | +| `credentials.apiKey` | Your ngrok API key. If provided, it will be will be written to the secret and the authtoken must be provided as well. | `""` | +| `credentials.authtoken` | Your ngrok authtoken. If provided, it will be will be written to the secret and the apiKey must be provided as well. | `""` | +| `region` | ngrok region to create tunnels in. Defaults to connect to the closest geographical region. | `""` | +| `serverAddr` | This is the URL of the ngrok server to connect to. You should set this if you are using a custom ingress URL. | `""` | +| `metaData` | This is a map of key/value pairs that will be added as meta data to all ngrok api resources created | `{}` | +| `affinity` | Affinity for the controller pod assignment | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `priorityClassName` | Priority class for pod scheduling | `""` | +| `podDisruptionBudget.create` | Enable a Pod Disruption Budget creation | `false` | +| `podDisruptionBudget.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | +| `podDisruptionBudget.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `1` | +| `resources.limits` | The resources limits for the container | `{}` | +| `resources.requests` | The requested resources for the container | `{}` | +| `extraVolumes` | An array of extra volumes to add to the controller. | `[]` | +| `extraVolumeMounts` | An array of extra volume mounts to add to the controller. | `[]` | +| `extraEnv` | an object of extra environment variables to add to the controller. | `{}` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.annotations` | Additional annotations to add to the ServiceAccount | `{}` | +| `log.level` | The level to log at. One of 'debug', 'info', or 'error'. | `info` | +| `log.stacktraceLevel` | The level to report stacktrace logs one of 'info' or 'error'. | `error` | +| `log.format` | The log format to use. One of console, json. | `json` | + diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/.helmignore b/charts/ngrok/kubernetes-ingress-controller/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/Chart.yaml b/charts/ngrok/kubernetes-ingress-controller/charts/common/Chart.yaml new file mode 100644 index 000000000..fe30b054e --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 2.10.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://bitnami.com +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: VMware, Inc. + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +type: library +version: 2.10.1 diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/README.md b/charts/ngrok/kubernetes-ingress-controller/charts/common/README.md new file mode 100644 index 000000000..fe6a01000 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/README.md @@ -0,0 +1,235 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between Bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 2.x.x + repository: oci://registry-1.docker.io/bitnamicharts +``` + +```console +helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +Looking to use our applications in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +#### What changes were introduced in this major version? + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +#### Considerations when upgrading to this version + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +#### Useful links + +- +- +- + +## License + +Copyright © 2023 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_affinities.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_affinities.tpl new file mode 100644 index 000000000..e85b1df45 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_affinities.tpl @@ -0,0 +1,139 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 + {{- range $extraPodAffinityTerms }} + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: {{ .weight | default 1 -}} + {{- end -}} +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "customLabels" .Values.podLabels "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "extraPodAffinityTerms" .Values.extraPodAffinityTerms "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $customLabels := default (dict) .customLabels -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +{{- $extraPodAffinityTerms := default (list) .extraPodAffinityTerms -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" .context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- range $extraPodAffinityTerms }} + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" ( dict "customLabels" $customLabels "context" $.context )) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := .extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + {{- end -}} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_capabilities.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..c6d115fe5 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_capabilities.tpl @@ -0,0 +1,185 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "common.capabilities.daemonset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Vertical Pod Autoscaler. +*/}} +{{- define "common.capabilities.vpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_errors.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..07ded6f64 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_errors.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_images.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_images.tpl new file mode 100644 index 000000000..2181f3224 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_images.tpl @@ -0,0 +1,85 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" .Values.global ) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- if $registryName }} + {{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- else -}} + {{- printf "%s%s%s" $repositoryName $separator $termination -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_ingress.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_ingress.tpl new file mode 100644 index 000000000..efa5b85c7 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_ingress.tpl @@ -0,0 +1,73 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_labels.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..a0534f7f9 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_labels.tpl @@ -0,0 +1,39 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Kubernetes standard labels +{{ include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" $) -}} +*/}} +{{- define "common.labels.standard" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{ merge (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) (dict "app.kubernetes.io/name" (include "common.names.name" .context) "helm.sh/chart" (include "common.names.chart" .context) "app.kubernetes.io/instance" .context.Release.Name "app.kubernetes.io/managed-by" .context.Release.Service) | toYaml }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} +{{- end -}} + +{{/* +Labels used on immutable fields such as deploy.spec.selector.matchLabels or svc.spec.selector +{{ include "common.labels.matchLabels" (dict "customLabels" .Values.podLabels "context" $) -}} + +We don't want to loop over custom labels appending them to the selector +since it's very likely that it will break deployments, services, etc. +However, it's important to overwrite the standard labels if the user +overwrote them on metadata.labels fields. +*/}} +{{- define "common.labels.matchLabels" -}} +{{- if and (hasKey . "customLabels") (hasKey . "context") -}} +{{ merge (pick (include "common.tplvalues.render" (dict "value" .customLabels "context" .context) | fromYaml) "app.kubernetes.io/name" "app.kubernetes.io/instance") (dict "app.kubernetes.io/name" (include "common.names.name" .context) "app.kubernetes.io/instance" .context.Release.Name ) | toYaml }} +{{- else -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_names.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_names.tpl new file mode 100644 index 000000000..a222924f1 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_names.tpl @@ -0,0 +1,71 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_secrets.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..a193c46b6 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_secrets.tpl @@ -0,0 +1,172 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + - failOnNew - Boolean - Optional - Default to true. If set to false, skip errors adding new keys to existing secrets. +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $failOnNew := default true .failOnNew }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else if $failOnNew }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else if .defaultValue -}} + {{- $value = .defaultValue | toString | b64enc -}} +{{- end -}} +{{- if $value -}} +{{- printf "%s" $value -}} +{{- end -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_storage.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..16405a0f8 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_storage.tpl @@ -0,0 +1,28 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_tplvalues.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..a8ed7637e --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,38 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template perhaps with scope if the scope is present. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ ) }} +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $ "scope" $app ) }} +*/}} +{{- define "common.tplvalues.render" -}} +{{- $value := typeIs "string" .value | ternary .value (.value | toYaml) }} +{{- if contains "{{" (toJson .value) }} + {{- if .scope }} + {{- tpl (cat "{{- with $.RelativeScope -}}" $value "{{- end }}") (merge (dict "RelativeScope" .scope) .context) }} + {{- else }} + {{- tpl $value .context }} + {{- end }} +{{- else }} + {{- $value }} +{{- end }} +{{- end -}} + +{{/* +Merge a list of values that contains template after rendering them. +Merge precedence is consistent with http://masterminds.github.io/sprig/dicts.html#merge-mustmerge +Usage: +{{ include "common.tplvalues.merge" ( dict "values" (list .Values.path.to.the.Value1 .Values.path.to.the.Value2) "context" $ ) }} +*/}} +{{- define "common.tplvalues.merge" -}} +{{- $dst := dict -}} +{{- range .values -}} +{{- $dst = include "common.tplvalues.render" (dict "value" . "context" $.context "scope" $.scope) | fromYaml | merge $dst -}} +{{- end -}} +{{ $dst | toYaml }} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_utils.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..c87040cd9 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_utils.tpl @@ -0,0 +1,67 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_warnings.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..66dffc1fe --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/_warnings.tpl @@ -0,0 +1,19 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_cassandra.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 000000000..eda9aada5 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,77 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_mariadb.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 000000000..17d83a2fd --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_mongodb.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 000000000..bbb445b86 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,113 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_mysql.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 000000000..ca3953f86 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,108 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_postgresql.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 000000000..8c9aa570e --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,134 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_redis.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_redis.tpl new file mode 100644 index 000000000..fc0d208dd --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,81 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_validations.tpl b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_validations.tpl new file mode 100644 index 000000000..31ceda871 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,51 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/charts/common/values.yaml b/charts/ngrok/kubernetes-ingress-controller/charts/common/values.yaml new file mode 100644 index 000000000..9abe0e154 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/charts/common/values.yaml @@ -0,0 +1,8 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/NOTES.txt b/charts/ngrok/kubernetes-ingress-controller/templates/NOTES.txt new file mode 100644 index 000000000..2de4debc3 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/NOTES.txt @@ -0,0 +1,53 @@ +================================================================================ +The ngrok Ingress controller has been deployed as a Deployment type to your +cluster. + +If you haven't yet, create some Ingress resources in your cluster and they will +be automatically configured on the internet using ngrok. + + +{{- $found := false }} +{{- range $svcIndex, $service := (lookup "v1" "Service" "" "").items }} + {{- if not $found }} + {{- range $portMapIdx, $portMap := $service.spec.ports }} + {{- if eq $portMap.port 80 443 }} + {{- if ne $service.metadata.name "kubernetes" }} + {{- $found = true -}} + {{- $randomStr := randAlphaNum 8 }} + +One example, taken from your cluster, is the Service: + {{ $service.metadata.name | quote }} + +You can make this accessible via Ngrok with the following manifest: +-------------------------------------------------------------------------------- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: {{ $service.metadata.name }} + namespace: {{ $service.metadata.namespace }} +spec: + ingressClassName: ngrok + rules: + - host: {{ $service.metadata.name -}}-{{- $randomStr -}}.ngrok.app + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: {{ $service.metadata.name }} + port: + number: {{ $portMap.port }} +-------------------------------------------------------------------------------- +Applying this manifest will make the service {{ $service.metadata.name | quote }} +available on the public internet at "https://{{ $service.metadata.name -}}-{{- $randomStr -}}.ngrok.app/". + {{- end }} + {{- end }} + {{- end }} + {{- end}} +{{- end }} + +Once done, view your edges in the Dashboard https://dashboard.ngrok.com/cloud-edge/edges +Find the tunnels running in your cluster here https://dashboard.ngrok.com/tunnels/agents + +If you have any questions or feedback, please join us in https://ngrok.com/slack and let us know! diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/_helpers.tpl b/charts/ngrok/kubernetes-ingress-controller/templates/_helpers.tpl new file mode 100644 index 000000000..c5be41cb2 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/_helpers.tpl @@ -0,0 +1,87 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "kubernetes-ingress-controller.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kubernetes-ingress-controller.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "kubernetes-ingress-controller.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default name for the credentials secret name using the helm release +*/}} +{{- define "kubernetes-ingress-controller.credentialsSecretName" -}} +{{- if .Values.credentials.secret.name -}} +{{- .Values.credentials.secret.name -}} +{{- else -}} +{{- printf "%s-credentials" (include "kubernetes-ingress-controller.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "kubernetes-ingress-controller.labels" -}} +helm.sh/chart: {{ include "kubernetes-ingress-controller.chart" . }} +{{ include "kubernetes-ingress-controller.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/part-of: {{ template "kubernetes-ingress-controller.name" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.commonLabels}} +{{ toYaml .Values.commonLabels }} +{{- end }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "kubernetes-ingress-controller.selectorLabels" -}} +app.kubernetes.io/name: {{ include "kubernetes-ingress-controller.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create the name of the controller service account to use +*/}} +{{- define "kubernetes-ingress-controller.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "kubernetes-ingress-controller.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the ngrok/ingress-controller image name +*/}} +{{- define "kubernetes-ingress-controller.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | default .Chart.AppVersion | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/controller-cm.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/controller-cm.yaml new file mode 100644 index 000000000..88728341c --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/controller-cm.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kubernetes-ingress-controller.fullname" . }}-manager-config + namespace: {{ .Release.Namespace }} +data: + controller_manager_config.yaml: | + apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 + kind: ControllerManagerConfig + health: + healthProbeBindAddress: :8081 + metrics: + bindAddress: 127.0.0.1:8080 + leaderElection: + leaderElect: true + resourceName: {{ include "kubernetes-ingress-controller.fullname" . }}-leader diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/controller-deployment.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/controller-deployment.yaml new file mode 100644 index 000000000..e29dbcc8f --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/controller-deployment.yaml @@ -0,0 +1,127 @@ +{{- $component := "controller" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + {{- include "kubernetes-ingress-controller.labels" . | nindent 4 }} + app.kubernetes.io/component: {{ $component }} + name: {{ include "kubernetes-ingress-controller.fullname" . }}-manager + namespace: {{ .Release.Namespace }} + annotations: + checksum/controller-role: {{ include (print $.Template.BasePath "/rbac/role.yaml") . | sha256sum }} + checksum/rbac: {{ include (print $.Template.BasePath "/controller-rbac.yaml") . | sha256sum }} +spec: + replicas: {{.Values.replicaCount}} + selector: + matchLabels: + {{- include "kubernetes-ingress-controller.selectorLabels" . | nindent 6 }} + {{- if .Values.podLabels }} + {{- toYaml .Values.podLabels | nindent 6 }} + {{- end }} + app.kubernetes.io/component: {{ $component }} + template: + metadata: + annotations: + {{- if .Values.podAnnotations }} + {{- toYaml .Values.podAnnotations | nindent 8 }} + {{- end }} + prometheus.io/path: /metrics + prometheus.io/port: '8080' + prometheus.io/scrape: 'true' + labels: + {{- include "kubernetes-ingress-controller.selectorLabels" . | nindent 8 }} + {{- if .Values.podLabels }} + {{- toYaml .Values.podLabels | nindent 8 }} + {{- end }} + app.kubernetes.io/component: {{ $component }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName | quote }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" $component "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" $component "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + serviceAccountName: {{ template "kubernetes-ingress-controller.serviceAccountName" . }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- toYaml .Values.image.pullSecrets | nindent 8 }} + {{- end }} + containers: + - name: ngrok-ingress-controller + image: {{ include "kubernetes-ingress-controller.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + command: + - /manager + args: + {{- if .Values.region }} + - --region={{ .Values.region}} + {{- end }} + {{- if .Values.serverAddr }} + - --server-addr={{ .Values.serverAddr}} + {{- end }} + {{- if .Values.metaData }} + - --metadata={{- $metadataArgs := list -}} + {{- range $key, $value := .Values.metaData }} + {{- $metadataArgs = append $metadataArgs (printf "%s=%s" $key $value) -}} + {{- end }} + {{- $metadataArgs | join "," }} + {{- end }} + - --controller-name={{ .Values.controllerName }} + {{- if .Values.watchNamespace }} + - --watch-namespace={{ .Values.watchNamespace}} + {{- end }} + - --zap-log-level={{ .Values.log.level }} + - --zap-stacktrace-level={{ .Values.log.stacktraceLevel }} + - --zap-encoder={{ .Values.log.format }} + - --health-probe-bind-address=:8081 + - --metrics-bind-address=:8080 + - --election-id={{ include "kubernetes-ingress-controller.fullname" . }}-leader + - --manager-name={{ include "kubernetes-ingress-controller.fullname" . }}-manager + securityContext: + allowPrivilegeEscalation: false + env: + - name: NGROK_API_KEY + valueFrom: + secretKeyRef: + key: API_KEY + name: {{ include "kubernetes-ingress-controller.credentialsSecretName" . }} + - name: NGROK_AUTHTOKEN + valueFrom: + secretKeyRef: + key: AUTHTOKEN + name: {{ include "kubernetes-ingress-controller.credentialsSecretName" . }} + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- range $key, $value := .Values.extraEnv }} + - name: {{ $key }} + value: {{- toYaml $value | nindent 12 }} + {{- end }} + {{- if .Values.extraVolumeMounts }} + volumeMounts: + {{ toYaml .Values.extraVolumeMounts | nindent 10 }} + {{- end }} + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: + {{- toYaml .Values.resources | nindent 10 }} + {{- if .Values.extraVolumes }} + volumes: + {{ toYaml .Values.extraVolumes | nindent 6 }} + {{- end }} diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/controller-pdb.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/controller-pdb.yaml new file mode 100644 index 000000000..7046631ac --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/controller-pdb.yaml @@ -0,0 +1,26 @@ +{{- if .Values.podDisruptionBudget.create }} +{{ $component := "controller"}} +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: {{ include "kubernetes-ingress-controller.fullname" . }}-controller-pdb + namespace: {{ .Release.Namespace | quote }} + labels: + {{- include "kubernetes-ingress-controller.labels" . | nindent 4 }} + app.kubernetes.io/component: {{ $component }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + {{- include "kubernetes-ingress-controller.selectorLabels" . | nindent 6 }} + {{- if .Values.podLabels }} + {{- toYaml .Values.podLabels | nindent 6 }} + {{- end }} + app.kubernetes.io/component: {{ $component }} +{{- end }} diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/controller-rbac.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/controller-rbac.yaml new file mode 100644 index 000000000..82fade5c7 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/controller-rbac.yaml @@ -0,0 +1,96 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: ngrok-ingress-controller-leader-election-role + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: ngrok-ingress-controller-proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ngrok-ingress-controller-leader-election-rolebinding + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ngrok-ingress-controller-leader-election-role +subjects: +- kind: ServiceAccount + name: {{ template "kubernetes-ingress-controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ngrok-ingress-controller-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ngrok-ingress-controller-manager-role +subjects: +- kind: ServiceAccount + name: {{ template "kubernetes-ingress-controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: ngrok-ingress-controller-proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ngrok-ingress-controller-proxy-role +subjects: +- kind: ServiceAccount + name: {{ template "kubernetes-ingress-controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/controller-serviceaccount.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/controller-serviceaccount.yaml new file mode 100644 index 000000000..d80a5d8c9 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/controller-serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create -}} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kubernetes-ingress-controller.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kubernetes-ingress-controller.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + {{- if .Values.serviceAccount.annotations }} + annotations: + {{- toYaml .Values.serviceAccount.annotations | nindent 4 }} + {{- end }} +{{- end }} diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_domains.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_domains.yaml new file mode 100644 index 000000000..9ddf22a23 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_domains.yaml @@ -0,0 +1,101 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: domains.ingress.k8s.ngrok.com +spec: + group: ingress.k8s.ngrok.com + names: + kind: Domain + listKind: DomainList + plural: domains + singular: domain + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Domain ID + jsonPath: .status.id + name: ID + type: string + - description: Region + jsonPath: .status.region + name: Region + type: string + - description: Domain + jsonPath: .status.domain + name: Domain + type: string + - description: CNAME Target + jsonPath: .status.cnameTarget + name: CNAME Target + type: string + - description: Age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Domain is the Schema for the domains API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: DomainSpec defines the desired state of Domain + properties: + description: + default: Created by kubernetes-ingress-controller + description: Description is a human-readable description of the object + in the ngrok API/Dashboard + type: string + domain: + description: Domain is the domain name to reserve + type: string + metadata: + default: '{"owned-by":"kubernetes-ingress-controller"}' + description: Metadata is a string of arbitrary data associated with + the object in the ngrok API/Dashboard + type: string + region: + description: Region is the region in which to reserve the domain + type: string + required: + - domain + type: object + status: + description: DomainStatus defines the observed state of Domain + properties: + cnameTarget: + description: CNAMETarget is the CNAME target for the domain + type: string + domain: + description: Domain is the domain that was reserved + type: string + id: + description: ID is the unique identifier of the domain + type: string + region: + description: Region is the region in which the domain was created + type: string + uri: + description: URI of the reserved domain API resource + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_httpsedges.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_httpsedges.yaml new file mode 100644 index 000000000..49149eb6a --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_httpsedges.yaml @@ -0,0 +1,1042 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: httpsedges.ingress.k8s.ngrok.com +spec: + group: ingress.k8s.ngrok.com + names: + kind: HTTPSEdge + listKind: HTTPSEdgeList + plural: httpsedges + singular: httpsedge + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: HTTPSEdge is the Schema for the httpsedges API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HTTPSEdgeSpec defines the desired state of HTTPSEdge + properties: + description: + default: Created by kubernetes-ingress-controller + description: Description is a human-readable description of the object + in the ngrok API/Dashboard + type: string + hostports: + description: Hostports is a list of hostports served by this edge + items: + type: string + type: array + metadata: + default: '{"owned-by":"kubernetes-ingress-controller"}' + description: Metadata is a string of arbitrary data associated with + the object in the ngrok API/Dashboard + type: string + routes: + description: Routes is a list of routes served by this edge + items: + properties: + backend: + description: Backend is the definition for the tunnel group + backend that serves traffic for this edge + properties: + description: + default: Created by kubernetes-ingress-controller + description: Description is a human-readable description + of the object in the ngrok API/Dashboard + type: string + labels: + additionalProperties: + type: string + description: Labels to watch for tunnels on this backend + type: object + metadata: + default: '{"owned-by":"kubernetes-ingress-controller"}' + description: Metadata is a string of arbitrary data associated + with the object in the ngrok API/Dashboard + type: string + type: object + circuitBreaker: + description: CircuitBreaker is a circuit breaker configuration + to apply to this route + properties: + errorThresholdPercentage: + anyOf: + - type: integer + - type: string + description: Error threshold percentage should be between + 0 - 1.0, not 0-100.0 + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + numBuckets: + description: Integer number of buckets into which metrics + are retained. Max 128. + format: int32 + maximum: 128 + minimum: 1 + type: integer + rollingWindow: + description: Statistical rolling window duration that metrics + are retained for. + format: duration + type: string + trippedDuration: + description: Duration after which the circuit is tripped + to wait before re-evaluating upstream health + format: duration + type: string + volumeThreshold: + description: Integer number of requests in a rolling window + that will trip the circuit. Helpful if traffic volume + is low. + format: int32 + type: integer + type: object + compression: + description: Compression is whether or not to enable compression + for this route + properties: + enabled: + description: Enabled is whether or not to enable compression + for this endpoint + type: boolean + type: object + description: + default: Created by kubernetes-ingress-controller + description: Description is a human-readable description of + the object in the ngrok API/Dashboard + type: string + headers: + description: Headers are request/response headers to apply to + this route + properties: + request: + description: Request headers are the request headers module + configuration or null + properties: + add: + additionalProperties: + type: string + description: a map of header key to header value that + will be injected into the HTTP Request before being + sent to the upstream application server + type: object + remove: + description: a list of header names that will be removed + from the HTTP Request before being sent to the upstream + application server + items: + type: string + type: array + type: object + response: + description: Response headers are the response headers module + configuration or null + properties: + add: + additionalProperties: + type: string + description: a map of header key to header value that + will be injected into the HTTP Response returned to + the HTTP client + type: object + remove: + description: a list of header names that will be removed + from the HTTP Response returned to the HTTP client + items: + type: string + type: array + type: object + type: object + ipRestriction: + description: IPRestriction is an IPRestriction to apply to this + route + properties: + policies: + items: + type: string + type: array + type: object + match: + description: Match is the value to match against the request + path + type: string + matchType: + description: 'MatchType is the type of match to use for this + route. Valid values are:' + enum: + - exact_path + - path_prefix + type: string + metadata: + default: '{"owned-by":"kubernetes-ingress-controller"}' + description: Metadata is a string of arbitrary data associated + with the object in the ngrok API/Dashboard + type: string + oauth: + description: OAuth configuration to apply to this route + properties: + amazon: + description: configuration for using amazon as the identity + provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it + will refresh user state from the identity provider + and recheck whether the user is still authorized to + access the endpoint. This is the preferred tunable + to use to enforce a minimum amount of time after which + a revoked user will no longer be able to access the + resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from + the identity provider's dashboard where you created + your own OAuth app. optional. if unspecified, ngrok + will use its own managed oauth application which has + additional restrictions. see the OAuth module docs + for more details. if present, clientSecret must be + present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if + from the identity provider's dashboard where you created + your own OAuth app. optional, see all of the caveats + in the docs for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok + sets on the http client to cache authentication. default + is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the + user has not accessed the endpoint, their session + will time out and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum + duration of an authenticated session. After this period + is exceeded, a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + scopes: + description: a list of provider-specific OAuth scopes + with the permissions your OAuth app would like to + ask for. these may not be set if you are using the + ngrok-managed oauth app (i.e. you must pass both client_id + and client_secret to set scopes) + items: + type: string + type: array + type: object + facebook: + description: configuration for using facebook as the identity + provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it + will refresh user state from the identity provider + and recheck whether the user is still authorized to + access the endpoint. This is the preferred tunable + to use to enforce a minimum amount of time after which + a revoked user will no longer be able to access the + resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from + the identity provider's dashboard where you created + your own OAuth app. optional. if unspecified, ngrok + will use its own managed oauth application which has + additional restrictions. see the OAuth module docs + for more details. if present, clientSecret must be + present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if + from the identity provider's dashboard where you created + your own OAuth app. optional, see all of the caveats + in the docs for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok + sets on the http client to cache authentication. default + is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the + user has not accessed the endpoint, their session + will time out and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum + duration of an authenticated session. After this period + is exceeded, a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + scopes: + description: a list of provider-specific OAuth scopes + with the permissions your OAuth app would like to + ask for. these may not be set if you are using the + ngrok-managed oauth app (i.e. you must pass both client_id + and client_secret to set scopes) + items: + type: string + type: array + type: object + github: + description: configuration for using github as the identity + provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it + will refresh user state from the identity provider + and recheck whether the user is still authorized to + access the endpoint. This is the preferred tunable + to use to enforce a minimum amount of time after which + a revoked user will no longer be able to access the + resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from + the identity provider's dashboard where you created + your own OAuth app. optional. if unspecified, ngrok + will use its own managed oauth application which has + additional restrictions. see the OAuth module docs + for more details. if present, clientSecret must be + present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if + from the identity provider's dashboard where you created + your own OAuth app. optional, see all of the caveats + in the docs for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok + sets on the http client to cache authentication. default + is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the + user has not accessed the endpoint, their session + will time out and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum + duration of an authenticated session. After this period + is exceeded, a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + organizations: + description: a list of github org identifiers. users + who are members of any of the listed organizations + will be allowed access. identifiers should be the + organization's 'slug' + items: + type: string + type: array + scopes: + description: a list of provider-specific OAuth scopes + with the permissions your OAuth app would like to + ask for. these may not be set if you are using the + ngrok-managed oauth app (i.e. you must pass both client_id + and client_secret to set scopes) + items: + type: string + type: array + teams: + description: a list of github teams identifiers. users + will be allowed access to the endpoint if they are + a member of any of these teams. identifiers should + be in the 'slug' format qualified with the org name, + e.g. org-name/team-name + items: + type: string + type: array + type: object + gitlab: + description: configuration for using gitlab as the identity + provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it + will refresh user state from the identity provider + and recheck whether the user is still authorized to + access the endpoint. This is the preferred tunable + to use to enforce a minimum amount of time after which + a revoked user will no longer be able to access the + resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from + the identity provider's dashboard where you created + your own OAuth app. optional. if unspecified, ngrok + will use its own managed oauth application which has + additional restrictions. see the OAuth module docs + for more details. if present, clientSecret must be + present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if + from the identity provider's dashboard where you created + your own OAuth app. optional, see all of the caveats + in the docs for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok + sets on the http client to cache authentication. default + is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the + user has not accessed the endpoint, their session + will time out and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum + duration of an authenticated session. After this period + is exceeded, a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + scopes: + description: a list of provider-specific OAuth scopes + with the permissions your OAuth app would like to + ask for. these may not be set if you are using the + ngrok-managed oauth app (i.e. you must pass both client_id + and client_secret to set scopes) + items: + type: string + type: array + type: object + google: + description: configuration for using google as the identity + provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it + will refresh user state from the identity provider + and recheck whether the user is still authorized to + access the endpoint. This is the preferred tunable + to use to enforce a minimum amount of time after which + a revoked user will no longer be able to access the + resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from + the identity provider's dashboard where you created + your own OAuth app. optional. if unspecified, ngrok + will use its own managed oauth application which has + additional restrictions. see the OAuth module docs + for more details. if present, clientSecret must be + present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if + from the identity provider's dashboard where you created + your own OAuth app. optional, see all of the caveats + in the docs for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok + sets on the http client to cache authentication. default + is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the + user has not accessed the endpoint, their session + will time out and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum + duration of an authenticated session. After this period + is exceeded, a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + scopes: + description: a list of provider-specific OAuth scopes + with the permissions your OAuth app would like to + ask for. these may not be set if you are using the + ngrok-managed oauth app (i.e. you must pass both client_id + and client_secret to set scopes) + items: + type: string + type: array + type: object + linkedin: + description: configuration for using linkedin as the identity + provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it + will refresh user state from the identity provider + and recheck whether the user is still authorized to + access the endpoint. This is the preferred tunable + to use to enforce a minimum amount of time after which + a revoked user will no longer be able to access the + resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from + the identity provider's dashboard where you created + your own OAuth app. optional. if unspecified, ngrok + will use its own managed oauth application which has + additional restrictions. see the OAuth module docs + for more details. if present, clientSecret must be + present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if + from the identity provider's dashboard where you created + your own OAuth app. optional, see all of the caveats + in the docs for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok + sets on the http client to cache authentication. default + is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the + user has not accessed the endpoint, their session + will time out and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum + duration of an authenticated session. After this period + is exceeded, a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + scopes: + description: a list of provider-specific OAuth scopes + with the permissions your OAuth app would like to + ask for. these may not be set if you are using the + ngrok-managed oauth app (i.e. you must pass both client_id + and client_secret to set scopes) + items: + type: string + type: array + type: object + microsoft: + description: configuration for using microsoft as the identity + provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it + will refresh user state from the identity provider + and recheck whether the user is still authorized to + access the endpoint. This is the preferred tunable + to use to enforce a minimum amount of time after which + a revoked user will no longer be able to access the + resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from + the identity provider's dashboard where you created + your own OAuth app. optional. if unspecified, ngrok + will use its own managed oauth application which has + additional restrictions. see the OAuth module docs + for more details. if present, clientSecret must be + present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if + from the identity provider's dashboard where you created + your own OAuth app. optional, see all of the caveats + in the docs for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok + sets on the http client to cache authentication. default + is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the + user has not accessed the endpoint, their session + will time out and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum + duration of an authenticated session. After this period + is exceeded, a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + scopes: + description: a list of provider-specific OAuth scopes + with the permissions your OAuth app would like to + ask for. these may not be set if you are using the + ngrok-managed oauth app (i.e. you must pass both client_id + and client_secret to set scopes) + items: + type: string + type: array + type: object + twitch: + description: configuration for using twitch as the identity + provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it + will refresh user state from the identity provider + and recheck whether the user is still authorized to + access the endpoint. This is the preferred tunable + to use to enforce a minimum amount of time after which + a revoked user will no longer be able to access the + resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from + the identity provider's dashboard where you created + your own OAuth app. optional. if unspecified, ngrok + will use its own managed oauth application which has + additional restrictions. see the OAuth module docs + for more details. if present, clientSecret must be + present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if + from the identity provider's dashboard where you created + your own OAuth app. optional, see all of the caveats + in the docs for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok + sets on the http client to cache authentication. default + is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the + endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the + user has not accessed the endpoint, their session + will time out and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum + duration of an authenticated session. After this period + is exceeded, a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + scopes: + description: a list of provider-specific OAuth scopes + with the permissions your OAuth app would like to + ask for. these may not be set if you are using the + ngrok-managed oauth app (i.e. you must pass both client_id + and client_secret to set scopes) + items: + type: string + type: array + type: object + type: object + oidc: + description: OIDC is the OpenID Connect configuration to apply + to this route + properties: + clientId: + description: The OIDC app's client ID and OIDC audience. + type: string + clientSecret: + description: The OIDC app's client secret. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok + sets on the http client to cache authentication. default + is 'ngrok.' + type: string + inactivityTimeout: + description: Duration of inactivity after which if the user + has not accessed the endpoint, their session will time + out and they will be forced to reauthenticate. + format: duration + type: string + issuer: + description: URL of the OIDC "OpenID provider". This is + the base URL used for discovery. + type: string + maximumDuration: + description: The maximum duration of an authenticated session. + After this period is exceeded, a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + scopes: + description: The set of scopes to request from the OIDC + identity provider. + items: + type: string + type: array + type: object + saml: + description: SAML is the SAML configuration to apply to this + route + properties: + allowIdpInitiated: + description: If true, the IdP may initiate a login directly + (e.g. the user does not need to visit the endpoint first + and then be redirected). The IdP should set the RelayState + parameter to the target URL of the resource they want + the user to be redirected to after the SAML login assertion + has been processed. + type: boolean + authorizedGroups: + description: If present, only users who are a member of + one of the listed groups may access the target endpoint. + items: + type: string + type: array + cookiePrefix: + description: the prefix of the session cookie that ngrok + sets on the http client to cache authentication. default + is 'ngrok.' + type: string + forceAuthn: + description: If true, indicates that whenever we redirect + a user to the IdP for authentication that the IdP must + prompt the user for authentication credentials even if + the user already has a valid session with the IdP. + type: boolean + idpMetadata: + description: The full XML IdP EntityDescriptor. Your IdP + may provide this to you as a a file to download or as + a URL. + type: string + inactivityTimeout: + description: Duration of inactivity after which if the user + has not accessed the endpoint, their session will time + out and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: The maximum duration of an authenticated session. + After this period is exceeded, a user must reauthenticate. + format: duration + type: string + nameidFormat: + description: Defines the name identifier format the SP expects + the IdP to use in its assertions to identify subjects. + If unspecified, a default value of urn:oasis:names:tc:SAML:2.0:nameid-format:persistent + will be used. A subset of the allowed values enumerated + by the SAML specification are supported. + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + type: object + webhookVerification: + description: WebhookVerification is webhook verification configuration + to apply to this route + properties: + provider: + description: a string indicating which webhook provider + will be sending webhooks to this endpoint. Value must + be one of the supported providers defined at https://ngrok.com/docs/cloud-edge#webhook-verification + type: string + secret: + description: SecretRef is a reference to a secret containing + the secret used to validate requests from the given provider. + All providers except AWS SNS require a secret + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + type: object + required: + - match + - matchType + type: object + type: array + tlsTermination: + description: TLSTermination is the TLS termination configuration for + this edge + properties: + minVersion: + description: MinVersion is the minimum TLS version to allow for + connections to the edge + type: string + type: object + type: object + status: + description: HTTPSEdgeStatus defines the observed state of HTTPSEdge + properties: + id: + description: ID is the unique identifier for this edge + type: string + routes: + items: + properties: + backend: + description: Backend stores the status of the tunnel group backend, + mainly the ID of the backend + properties: + id: + description: ID is the unique identifier for this backend + type: string + type: object + id: + description: ID is the unique identifier for this route + type: string + match: + type: string + matchType: + type: string + uri: + description: URI is the URI for this route + type: string + type: object + type: array + uri: + description: URI is the URI for this edge + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_ippolicies.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_ippolicies.yaml new file mode 100644 index 000000000..a1a319fbb --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_ippolicies.yaml @@ -0,0 +1,105 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: ippolicies.ingress.k8s.ngrok.com +spec: + group: ingress.k8s.ngrok.com + names: + kind: IPPolicy + listKind: IPPolicyList + plural: ippolicies + singular: ippolicy + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: IPPolicy ID + jsonPath: .status.id + name: ID + type: string + - description: Age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: IPPolicy is the Schema for the ippolicies API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: IPPolicySpec defines the desired state of IPPolicy + properties: + description: + default: Created by kubernetes-ingress-controller + description: Description is a human-readable description of the object + in the ngrok API/Dashboard + type: string + metadata: + default: '{"owned-by":"kubernetes-ingress-controller"}' + description: Metadata is a string of arbitrary data associated with + the object in the ngrok API/Dashboard + type: string + rules: + description: Rules is a list of rules that belong to the policy + items: + properties: + action: + enum: + - allow + - deny + type: string + cidr: + type: string + description: + default: Created by kubernetes-ingress-controller + description: Description is a human-readable description of + the object in the ngrok API/Dashboard + type: string + metadata: + default: '{"owned-by":"kubernetes-ingress-controller"}' + description: Metadata is a string of arbitrary data associated + with the object in the ngrok API/Dashboard + type: string + type: object + type: array + type: object + status: + description: IPPolicyStatus defines the observed state of IPPolicy + properties: + id: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + rules: + items: + properties: + action: + type: string + cidr: + type: string + id: + type: string + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_ngrokmodulesets.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_ngrokmodulesets.yaml new file mode 100644 index 000000000..8521464f3 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_ngrokmodulesets.yaml @@ -0,0 +1,883 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: ngrokmodulesets.ingress.k8s.ngrok.com +spec: + group: ingress.k8s.ngrok.com + names: + kind: NgrokModuleSet + listKind: NgrokModuleSetList + plural: ngrokmodulesets + singular: ngrokmoduleset + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: NgrokModuleSet is the Schema for the ngrokmodules API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + modules: + properties: + circuitBreaker: + description: CircuitBreaker configuration for this module set + properties: + errorThresholdPercentage: + anyOf: + - type: integer + - type: string + description: Error threshold percentage should be between 0 - + 1.0, not 0-100.0 + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + numBuckets: + description: Integer number of buckets into which metrics are + retained. Max 128. + format: int32 + maximum: 128 + minimum: 1 + type: integer + rollingWindow: + description: Statistical rolling window duration that metrics + are retained for. + format: duration + type: string + trippedDuration: + description: Duration after which the circuit is tripped to wait + before re-evaluating upstream health + format: duration + type: string + volumeThreshold: + description: Integer number of requests in a rolling window that + will trip the circuit. Helpful if traffic volume is low. + format: int32 + type: integer + type: object + compression: + description: Compression configuration for this module set + properties: + enabled: + description: Enabled is whether or not to enable compression for + this endpoint + type: boolean + type: object + headers: + description: Header configuration for this module set + properties: + request: + description: Request headers are the request headers module configuration + or null + properties: + add: + additionalProperties: + type: string + description: a map of header key to header value that will + be injected into the HTTP Request before being sent to the + upstream application server + type: object + remove: + description: a list of header names that will be removed from + the HTTP Request before being sent to the upstream application + server + items: + type: string + type: array + type: object + response: + description: Response headers are the response headers module + configuration or null + properties: + add: + additionalProperties: + type: string + description: a map of header key to header value that will + be injected into the HTTP Response returned to the HTTP + client + type: object + remove: + description: a list of header names that will be removed from + the HTTP Response returned to the HTTP client + items: + type: string + type: array + type: object + type: object + ipRestriction: + description: IPRestriction configuration for this module set + properties: + policies: + items: + type: string + type: array + type: object + oauth: + description: OAuth configuration for this module set + properties: + amazon: + description: configuration for using amazon as the identity provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it will + refresh user state from the identity provider and recheck + whether the user is still authorized to access the endpoint. + This is the preferred tunable to use to enforce a minimum + amount of time after which a revoked user will no longer + be able to access the resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from the + identity provider's dashboard where you created your own + OAuth app. optional. if unspecified, ngrok will use its + own managed oauth application which has additional restrictions. + see the OAuth module docs for more details. if present, + clientSecret must be present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if from + the identity provider's dashboard where you created your + own OAuth app. optional, see all of the caveats in the docs + for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok sets + on the http client to cache authentication. default is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the user + has not accessed the endpoint, their session will time out + and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum duration + of an authenticated session. After this period is exceeded, + a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + scopes: + description: a list of provider-specific OAuth scopes with + the permissions your OAuth app would like to ask for. these + may not be set if you are using the ngrok-managed oauth + app (i.e. you must pass both client_id and client_secret + to set scopes) + items: + type: string + type: array + type: object + facebook: + description: configuration for using facebook as the identity + provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it will + refresh user state from the identity provider and recheck + whether the user is still authorized to access the endpoint. + This is the preferred tunable to use to enforce a minimum + amount of time after which a revoked user will no longer + be able to access the resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from the + identity provider's dashboard where you created your own + OAuth app. optional. if unspecified, ngrok will use its + own managed oauth application which has additional restrictions. + see the OAuth module docs for more details. if present, + clientSecret must be present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if from + the identity provider's dashboard where you created your + own OAuth app. optional, see all of the caveats in the docs + for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok sets + on the http client to cache authentication. default is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the user + has not accessed the endpoint, their session will time out + and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum duration + of an authenticated session. After this period is exceeded, + a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + scopes: + description: a list of provider-specific OAuth scopes with + the permissions your OAuth app would like to ask for. these + may not be set if you are using the ngrok-managed oauth + app (i.e. you must pass both client_id and client_secret + to set scopes) + items: + type: string + type: array + type: object + github: + description: configuration for using github as the identity provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it will + refresh user state from the identity provider and recheck + whether the user is still authorized to access the endpoint. + This is the preferred tunable to use to enforce a minimum + amount of time after which a revoked user will no longer + be able to access the resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from the + identity provider's dashboard where you created your own + OAuth app. optional. if unspecified, ngrok will use its + own managed oauth application which has additional restrictions. + see the OAuth module docs for more details. if present, + clientSecret must be present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if from + the identity provider's dashboard where you created your + own OAuth app. optional, see all of the caveats in the docs + for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok sets + on the http client to cache authentication. default is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the user + has not accessed the endpoint, their session will time out + and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum duration + of an authenticated session. After this period is exceeded, + a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + organizations: + description: a list of github org identifiers. users who are + members of any of the listed organizations will be allowed + access. identifiers should be the organization's 'slug' + items: + type: string + type: array + scopes: + description: a list of provider-specific OAuth scopes with + the permissions your OAuth app would like to ask for. these + may not be set if you are using the ngrok-managed oauth + app (i.e. you must pass both client_id and client_secret + to set scopes) + items: + type: string + type: array + teams: + description: a list of github teams identifiers. users will + be allowed access to the endpoint if they are a member of + any of these teams. identifiers should be in the 'slug' + format qualified with the org name, e.g. org-name/team-name + items: + type: string + type: array + type: object + gitlab: + description: configuration for using gitlab as the identity provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it will + refresh user state from the identity provider and recheck + whether the user is still authorized to access the endpoint. + This is the preferred tunable to use to enforce a minimum + amount of time after which a revoked user will no longer + be able to access the resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from the + identity provider's dashboard where you created your own + OAuth app. optional. if unspecified, ngrok will use its + own managed oauth application which has additional restrictions. + see the OAuth module docs for more details. if present, + clientSecret must be present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if from + the identity provider's dashboard where you created your + own OAuth app. optional, see all of the caveats in the docs + for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok sets + on the http client to cache authentication. default is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the user + has not accessed the endpoint, their session will time out + and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum duration + of an authenticated session. After this period is exceeded, + a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + scopes: + description: a list of provider-specific OAuth scopes with + the permissions your OAuth app would like to ask for. these + may not be set if you are using the ngrok-managed oauth + app (i.e. you must pass both client_id and client_secret + to set scopes) + items: + type: string + type: array + type: object + google: + description: configuration for using google as the identity provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it will + refresh user state from the identity provider and recheck + whether the user is still authorized to access the endpoint. + This is the preferred tunable to use to enforce a minimum + amount of time after which a revoked user will no longer + be able to access the resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from the + identity provider's dashboard where you created your own + OAuth app. optional. if unspecified, ngrok will use its + own managed oauth application which has additional restrictions. + see the OAuth module docs for more details. if present, + clientSecret must be present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if from + the identity provider's dashboard where you created your + own OAuth app. optional, see all of the caveats in the docs + for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok sets + on the http client to cache authentication. default is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the user + has not accessed the endpoint, their session will time out + and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum duration + of an authenticated session. After this period is exceeded, + a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + scopes: + description: a list of provider-specific OAuth scopes with + the permissions your OAuth app would like to ask for. these + may not be set if you are using the ngrok-managed oauth + app (i.e. you must pass both client_id and client_secret + to set scopes) + items: + type: string + type: array + type: object + linkedin: + description: configuration for using linkedin as the identity + provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it will + refresh user state from the identity provider and recheck + whether the user is still authorized to access the endpoint. + This is the preferred tunable to use to enforce a minimum + amount of time after which a revoked user will no longer + be able to access the resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from the + identity provider's dashboard where you created your own + OAuth app. optional. if unspecified, ngrok will use its + own managed oauth application which has additional restrictions. + see the OAuth module docs for more details. if present, + clientSecret must be present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if from + the identity provider's dashboard where you created your + own OAuth app. optional, see all of the caveats in the docs + for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok sets + on the http client to cache authentication. default is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the user + has not accessed the endpoint, their session will time out + and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum duration + of an authenticated session. After this period is exceeded, + a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + scopes: + description: a list of provider-specific OAuth scopes with + the permissions your OAuth app would like to ask for. these + may not be set if you are using the ngrok-managed oauth + app (i.e. you must pass both client_id and client_secret + to set scopes) + items: + type: string + type: array + type: object + microsoft: + description: configuration for using microsoft as the identity + provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it will + refresh user state from the identity provider and recheck + whether the user is still authorized to access the endpoint. + This is the preferred tunable to use to enforce a minimum + amount of time after which a revoked user will no longer + be able to access the resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from the + identity provider's dashboard where you created your own + OAuth app. optional. if unspecified, ngrok will use its + own managed oauth application which has additional restrictions. + see the OAuth module docs for more details. if present, + clientSecret must be present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if from + the identity provider's dashboard where you created your + own OAuth app. optional, see all of the caveats in the docs + for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok sets + on the http client to cache authentication. default is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the user + has not accessed the endpoint, their session will time out + and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum duration + of an authenticated session. After this period is exceeded, + a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + scopes: + description: a list of provider-specific OAuth scopes with + the permissions your OAuth app would like to ask for. these + may not be set if you are using the ngrok-managed oauth + app (i.e. you must pass both client_id and client_secret + to set scopes) + items: + type: string + type: array + type: object + twitch: + description: configuration for using twitch as the identity provider + properties: + authCheckInterval: + description: Duration after which ngrok guarantees it will + refresh user state from the identity provider and recheck + whether the user is still authorized to access the endpoint. + This is the preferred tunable to use to enforce a minimum + amount of time after which a revoked user will no longer + be able to access the resource. + format: duration + type: string + clientId: + description: the OAuth app client ID. retrieve it from the + identity provider's dashboard where you created your own + OAuth app. optional. if unspecified, ngrok will use its + own managed oauth application which has additional restrictions. + see the OAuth module docs for more details. if present, + clientSecret must be present as well. + type: string + clientSecret: + description: the OAuth app client secret. retrieve if from + the identity provider's dashboard where you created your + own OAuth app. optional, see all of the caveats in the docs + for clientId. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok sets + on the http client to cache authentication. default is 'ngrok.' + type: string + emailAddresses: + description: a list of email addresses of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + emailDomains: + description: a list of email domains of users authenticated + by identity provider who are allowed access to the endpoint + items: + type: string + type: array + inactivityTimeout: + description: Duration of inactivity after which if the user + has not accessed the endpoint, their session will time out + and they will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: Integer number of seconds of the maximum duration + of an authenticated session. After this period is exceeded, + a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS + requests. necessary if you are supporting CORS. + type: boolean + scopes: + description: a list of provider-specific OAuth scopes with + the permissions your OAuth app would like to ask for. these + may not be set if you are using the ngrok-managed oauth + app (i.e. you must pass both client_id and client_secret + to set scopes) + items: + type: string + type: array + type: object + type: object + oidc: + description: OIDC configuration for this module set + properties: + clientId: + description: The OIDC app's client ID and OIDC audience. + type: string + clientSecret: + description: The OIDC app's client secret. + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + cookiePrefix: + description: the prefix of the session cookie that ngrok sets + on the http client to cache authentication. default is 'ngrok.' + type: string + inactivityTimeout: + description: Duration of inactivity after which if the user has + not accessed the endpoint, their session will time out and they + will be forced to reauthenticate. + format: duration + type: string + issuer: + description: URL of the OIDC "OpenID provider". This is the base + URL used for discovery. + type: string + maximumDuration: + description: The maximum duration of an authenticated session. + After this period is exceeded, a user must reauthenticate. + format: duration + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS requests. + necessary if you are supporting CORS. + type: boolean + scopes: + description: The set of scopes to request from the OIDC identity + provider. + items: + type: string + type: array + type: object + saml: + description: SAML configuration for this module set + properties: + allowIdpInitiated: + description: If true, the IdP may initiate a login directly (e.g. + the user does not need to visit the endpoint first and then + be redirected). The IdP should set the RelayState parameter + to the target URL of the resource they want the user to be redirected + to after the SAML login assertion has been processed. + type: boolean + authorizedGroups: + description: If present, only users who are a member of one of + the listed groups may access the target endpoint. + items: + type: string + type: array + cookiePrefix: + description: the prefix of the session cookie that ngrok sets + on the http client to cache authentication. default is 'ngrok.' + type: string + forceAuthn: + description: If true, indicates that whenever we redirect a user + to the IdP for authentication that the IdP must prompt the user + for authentication credentials even if the user already has + a valid session with the IdP. + type: boolean + idpMetadata: + description: The full XML IdP EntityDescriptor. Your IdP may provide + this to you as a a file to download or as a URL. + type: string + inactivityTimeout: + description: Duration of inactivity after which if the user has + not accessed the endpoint, their session will time out and they + will be forced to reauthenticate. + format: duration + type: string + maximumDuration: + description: The maximum duration of an authenticated session. + After this period is exceeded, a user must reauthenticate. + format: duration + type: string + nameidFormat: + description: Defines the name identifier format the SP expects + the IdP to use in its assertions to identify subjects. If unspecified, + a default value of urn:oasis:names:tc:SAML:2.0:nameid-format:persistent + will be used. A subset of the allowed values enumerated by the + SAML specification are supported. + type: string + optionsPassthrough: + description: Do not enforce authentication on HTTP OPTIONS requests. + necessary if you are supporting CORS. + type: boolean + type: object + tlsTermination: + description: TLSTermination configuration for this module set + properties: + minVersion: + description: MinVersion is the minimum TLS version to allow for + connections to the edge + type: string + type: object + webhookVerification: + description: WebhookVerification configuration for this module set + properties: + provider: + description: a string indicating which webhook provider will be + sending webhooks to this endpoint. Value must be one of the + supported providers defined at https://ngrok.com/docs/cloud-edge#webhook-verification + type: string + secret: + description: SecretRef is a reference to a secret containing the + secret used to validate requests from the given provider. All + providers except AWS SNS require a secret + properties: + key: + description: Key in the secret to use + type: string + name: + description: Name of the Kubernetes secret + type: string + type: object + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_tcpedges.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_tcpedges.yaml new file mode 100644 index 000000000..ffb4af821 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_tcpedges.yaml @@ -0,0 +1,121 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: tcpedges.ingress.k8s.ngrok.com +spec: + group: ingress.k8s.ngrok.com + names: + kind: TCPEdge + listKind: TCPEdgeList + plural: tcpedges + singular: tcpedge + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Domain ID + jsonPath: .status.id + name: ID + type: string + - description: Hostports + jsonPath: .status.hostports + name: Hostports + type: string + - description: Tunnel Group Backend ID + jsonPath: .status.backend.id + name: Backend ID + type: string + - description: Age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: TCPEdge is the Schema for the tcpedges API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TCPEdgeSpec defines the desired state of TCPEdge + properties: + backend: + description: Backend is the definition for the tunnel group backend + that serves traffic for this edge + properties: + description: + default: Created by kubernetes-ingress-controller + description: Description is a human-readable description of the + object in the ngrok API/Dashboard + type: string + labels: + additionalProperties: + type: string + description: Labels to watch for tunnels on this backend + type: object + metadata: + default: '{"owned-by":"kubernetes-ingress-controller"}' + description: Metadata is a string of arbitrary data associated + with the object in the ngrok API/Dashboard + type: string + type: object + description: + default: Created by kubernetes-ingress-controller + description: Description is a human-readable description of the object + in the ngrok API/Dashboard + type: string + ipRestriction: + description: IPRestriction is an IPRestriction to apply to this route + properties: + policies: + items: + type: string + type: array + type: object + metadata: + default: '{"owned-by":"kubernetes-ingress-controller"}' + description: Metadata is a string of arbitrary data associated with + the object in the ngrok API/Dashboard + type: string + type: object + status: + description: TCPEdgeStatus defines the observed state of TCPEdge + properties: + backend: + description: Backend stores the status of the tunnel group backend, + mainly the ID of the backend + properties: + id: + description: ID is the unique identifier for this backend + type: string + type: object + hostports: + description: Hostports served by this edge + items: + type: string + type: array + id: + description: ID is the unique identifier for this edge + type: string + uri: + description: URI is the URI of the edge + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_tunnels.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_tunnels.yaml new file mode 100644 index 000000000..f67724a32 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/crds/ingress.k8s.ngrok.com_tunnels.yaml @@ -0,0 +1,70 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.9.2 + creationTimestamp: null + name: tunnels.ingress.k8s.ngrok.com +spec: + group: ingress.k8s.ngrok.com + names: + kind: Tunnel + listKind: TunnelList + plural: tunnels + singular: tunnel + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Service/port to forward to + jsonPath: .spec.forwardsTo + name: ForwardsTo + type: string + - description: Age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: Tunnel is the Schema for the tunnels API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: TunnelSpec defines the desired state of Tunnel + properties: + backend: + description: The configuration for backend connections to services + properties: + protocol: + type: string + type: object + forwardsTo: + description: ForwardsTo is the name and port of the service to forward + traffic to + type: string + labels: + additionalProperties: + type: string + description: Labels are key/value pairs that are attached to the tunnel + type: object + type: object + status: + description: TunnelStatus defines the observed state of Tunnel + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/credentials-secret.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/credentials-secret.yaml new file mode 100644 index 000000000..c6c7286ea --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/credentials-secret.yaml @@ -0,0 +1,11 @@ +{{- if or (not (empty .Values.credentials.apiKey)) (not (empty .Values.credentials.authtoken)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "kubernetes-ingress-controller.credentialsSecretName" .}} + namespace: {{ .Release.Namespace }} +type: Opaque +data: + API_KEY: {{ required "An ngrok API key is required" .Values.credentials.apiKey | b64enc }} + AUTHTOKEN: {{ required "An ngrok Authtoken is required" .Values.credentials.authtoken | b64enc }} +{{ end }} diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/ingress-class.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/ingress-class.yaml new file mode 100644 index 000000000..0932b4705 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/ingress-class.yaml @@ -0,0 +1,15 @@ +{{- if .Values.ingressClass.create -}} +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + {{- include "kubernetes-ingress-controller.labels" . | nindent 4 }} + app.kubernetes.io/component: controller + name: {{ .Values.ingressClass.name }} + {{- if .Values.ingressClass.default }} + annotations: + ingressclass.kubernetes.io/is-default-class: "true" + {{- end }} +spec: + controller: {{ .Values.controllerName }} +{{- end}} diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/rbac/domain_editor_role.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/domain_editor_role.yaml new file mode 100644 index 000000000..88e527a3f --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/domain_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit domains. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "kubernetes-ingress-controller.labels" . | nindent 4 }} + app.kubernetes.io/component: rbac + name: {{ include "kubernetes-ingress-controller.fullname" . }}-domain-editor-role +rules: +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - domains + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - domains/status + verbs: + - get diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/rbac/domain_viewer_role.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/domain_viewer_role.yaml new file mode 100644 index 000000000..bf5f5196e --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/domain_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view domains. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "kubernetes-ingress-controller.labels" . | nindent 4 }} + app.kubernetes.io/component: rbac + name: {{ include "kubernetes-ingress-controller.fullname" . }}-domain-viewer-role +rules: +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - domains + verbs: + - get + - list + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - domains/status + verbs: + - get diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/rbac/httpsedge_editor_role.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/httpsedge_editor_role.yaml new file mode 100644 index 000000000..d4d383154 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/httpsedge_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit httpsedges. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: httpsedge-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: ngrok-ingress-controller + app.kubernetes.io/part-of: ngrok-ingress-controller + app.kubernetes.io/managed-by: kustomize + name: httpsedge-editor-role +rules: +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - httpsedges + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - httpsedges/status + verbs: + - get diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/rbac/httpsedge_viewer_role.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/httpsedge_viewer_role.yaml new file mode 100644 index 000000000..8d01cd726 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/httpsedge_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view httpsedges. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: httpsedge-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: ngrok-ingress-controller + app.kubernetes.io/part-of: ngrok-ingress-controller + app.kubernetes.io/managed-by: kustomize + name: httpsedge-viewer-role +rules: +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - httpsedges + verbs: + - get + - list + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - httpsedges/status + verbs: + - get diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/rbac/ippolicy_editor_role.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/ippolicy_editor_role.yaml new file mode 100644 index 000000000..a8aa5ebe6 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/ippolicy_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit ippolicies. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: ippolicy-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: ngrok-ingress-controller + app.kubernetes.io/part-of: ngrok-ingress-controller + app.kubernetes.io/managed-by: kustomize + name: ippolicy-editor-role +rules: +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - ippolicies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - ippolicies/status + verbs: + - get diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/rbac/ippolicy_viewer_role.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/ippolicy_viewer_role.yaml new file mode 100644 index 000000000..a83a34ab6 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/ippolicy_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view ippolicies. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: ippolicy-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: ngrok-ingress-controller + app.kubernetes.io/part-of: ngrok-ingress-controller + app.kubernetes.io/managed-by: kustomize + name: ippolicy-viewer-role +rules: +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - ippolicies + verbs: + - get + - list + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - ippolicies/status + verbs: + - get diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/rbac/ngrokmoduleset_editor_role.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/ngrokmoduleset_editor_role.yaml new file mode 100644 index 000000000..c8d5f1631 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/ngrokmoduleset_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit ngrokmodulesets. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: ngrokmoduleset-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubernetes-ingress-controller + app.kubernetes.io/part-of: kubernetes-ingress-controller + app.kubernetes.io/managed-by: kustomize + name: ngrokmoduleset-editor-role +rules: +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - ngrokmodulesets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - ngrokmodulesets/status + verbs: + - get diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/rbac/ngrokmoduleset_viewer_role.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/ngrokmoduleset_viewer_role.yaml new file mode 100644 index 000000000..a9948ad44 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/ngrokmoduleset_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view ngrokmodulesets. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: ngrokmoduleset-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: kubernetes-ingress-controller + app.kubernetes.io/part-of: kubernetes-ingress-controller + app.kubernetes.io/managed-by: kustomize + name: ngrokmoduleset-viewer-role +rules: +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - ngrokmodulesets + verbs: + - get + - list + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - ngrokmodulesets/status + verbs: + - get diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/rbac/role.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/role.yaml new file mode 100644 index 000000000..f514256ff --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/role.yaml @@ -0,0 +1,205 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: ngrok-ingress-controller-manager-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - domains + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - domains/finalizers + verbs: + - update +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - domains/status + verbs: + - get + - patch + - update +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - httpsedges + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - httpsedges/finalizers + verbs: + - update +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - httpsedges/status + verbs: + - get + - patch + - update +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - ippolicies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - ippolicies/finalizers + verbs: + - update +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - ippolicies/status + verbs: + - get + - patch + - update +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - ngrokmodulesets + verbs: + - get + - list + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - tcpedges + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - tcpedges/finalizers + verbs: + - update +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - tcpedges/status + verbs: + - get + - patch + - update +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - tunnels + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - tunnels/finalizers + verbs: + - update +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - tunnels/status + verbs: + - get + - patch + - update +- apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - update + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - get + - list + - update + - watch diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/rbac/tcpedge_editor_role.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/tcpedge_editor_role.yaml new file mode 100644 index 000000000..2e49847f8 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/tcpedge_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit tcpedges. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: tcpedge-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: ngrok-ingress-controller + app.kubernetes.io/part-of: ngrok-ingress-controller + app.kubernetes.io/managed-by: kustomize + name: tcpedge-editor-role +rules: +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - tcpedges + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - tcpedges/status + verbs: + - get diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/rbac/tcpedge_viewer_role.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/tcpedge_viewer_role.yaml new file mode 100644 index 000000000..b8eb5ef1f --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/tcpedge_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view tcpedges. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: tcpedge-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: ngrok-ingress-controller + app.kubernetes.io/part-of: ngrok-ingress-controller + app.kubernetes.io/managed-by: kustomize + name: tcpedge-viewer-role +rules: +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - tcpedges + verbs: + - get + - list + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - tcpedges/status + verbs: + - get diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/rbac/tunnel_editor_role.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/tunnel_editor_role.yaml new file mode 100644 index 000000000..ab7275165 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/tunnel_editor_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to edit tunnels. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "kubernetes-ingress-controller.labels" . | nindent 4 }} + app.kubernetes.io/component: rbac + name: {{ include "kubernetes-ingress-controller.fullname" . }}-tunnel-editor-role +rules: +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - tunnels + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - tunnels/status + verbs: + - get diff --git a/charts/ngrok/kubernetes-ingress-controller/templates/rbac/tunnel_viewer_role.yaml b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/tunnel_viewer_role.yaml new file mode 100644 index 000000000..dfdb4b6f0 --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/templates/rbac/tunnel_viewer_role.yaml @@ -0,0 +1,23 @@ +# permissions for end users to view tunnels. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + {{- include "kubernetes-ingress-controller.labels" . | nindent 4 }} + app.kubernetes.io/component: rbac + name: {{ include "kubernetes-ingress-controller.fullname" . }}-tunnel-viewer-role +rules: +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - tunnels + verbs: + - get + - list + - watch +- apiGroups: + - ingress.k8s.ngrok.com + resources: + - tunnels/status + verbs: + - get diff --git a/charts/ngrok/kubernetes-ingress-controller/values.yaml b/charts/ngrok/kubernetes-ingress-controller/values.yaml new file mode 100644 index 000000000..7907a1dcd --- /dev/null +++ b/charts/ngrok/kubernetes-ingress-controller/values.yaml @@ -0,0 +1,191 @@ +## @section Common parameters +## + +## @param nameOverride String to partially override generated resource names +## @param fullnameOverride String to fully override generated resource names +## @param commonLabels Labels to add to all deployed objects +## @param commonAnnotations Annotations to add to all deployed objects +## +nameOverride: "" +fullnameOverride: "" +commonLabels: {} +commonAnnotations: {} + +## @section Controller parameters +## + +## @param podAnnotations Used to apply custom annotations to the ingress pods. +## @param podLabels Used to apply custom labels to the ingress pods. +## +podAnnotations: {} +podLabels: {} + +## @param replicaCount The number of controllers to run. +## A minimum of 2 is recommended in production for HA. +## +replicaCount: 1 + +## @param image.registry The ngrok ingress controller image registry. +## @param image.repository The ngrok ingress controller image repository. +## @param image.tag The ngrok ingress controller image tag. Defaults to the chart's appVersion if not specified +## @param image.pullPolicy The ngrok ingress controller image pull policy. +## @param image.pullSecrets An array of imagePullSecrets to be used when pulling the image. +image: + registry: docker.io + repository: ngrok/kubernetes-ingress-controller + tag: "" + pullPolicy: IfNotPresent + ## Example + ## pullSecrets: + ## - name: my-imagepull-secret + ## + pullSecrets: [] + +## @param ingressClass.name The name of the ingress class to use. +## @param ingressClass.create Whether to create the ingress class. +## @param ingressClass.default Whether to set the ingress class as default. +ingressClass: + name: ngrok + create: true + default: false + +## @param controllerName The name of the controller to look for matching ingress classes +controllerName: "k8s.ngrok.com/ingress-controller" + +## @param watchNamespace The namespace to watch for ingress resources. Defaults to all +watchNamespace: "" + +## @param credentials.secret.name The name of the secret the credentials are in. If not provided, one will be generated using the helm release name. +## @param credentials.apiKey Your ngrok API key. If provided, it will be will be written to the secret and the authtoken must be provided as well. +## @param credentials.authtoken Your ngrok authtoken. If provided, it will be will be written to the secret and the apiKey must be provided as well. +credentials: + secret: + name: "" + apiKey: "" + authtoken: "" + +## @param region ngrok region to create tunnels in. Defaults to connect to the closest geographical region. +region: "" + +## @param serverAddr This is the URL of the ngrok server to connect to. You should set this if you are using a custom ingress URL. +serverAddr: "" + +## @param metaData This is a map of key/value pairs that will be added as meta data to all ngrok api resources created +metaData: {} + +## @param affinity Affinity for the controller pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + +## @param priorityClassName Priority class for pod scheduling +## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass +priorityClassName: "" + +## Pod Disruption Budget configuration +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +## @param podDisruptionBudget.create Enable a Pod Disruption Budget creation +## @param podDisruptionBudget.minAvailable Minimum number/percentage of pods that should remain scheduled +## @param podDisruptionBudget.maxUnavailable Maximum number/percentage of pods that may be made unavailable +## +podDisruptionBudget: + create: false + minAvailable: "" + maxUnavailable: 1 + +## Controller container resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## We usually recommend not to specify default resources and to leave this as a conscious +## choice for the user. This also increases chances charts run on environments with little +## resources, such as Minikube. If you do want to specify resources, uncomment the following +## lines, adjust them as necessary, and remove the curly braces after 'resources:'. +## @param resources.limits The resources limits for the container +## @param resources.requests The requested resources for the container +## +resources: + ## Example: + ## limits: + ## cpu: 100m + ## memory: 128Mi + ## + limits: {} + ## Examples: + ## requests: + ## cpu: 100m + ## memory: 128Mi + ## + requests: {} + + +## @param extraVolumes An array of extra volumes to add to the controller. +extraVolumes: [] +## @param extraVolumeMounts An array of extra volume mounts to add to the controller. +extraVolumeMounts: [] +## +## Example: +## +## extraVolumes: +## - name: test-volume +## emptyDir: {} +## extraVolumeMounts: +## - name: test-volume +## mountPath: /test-volume + + +## @param extraEnv an object of extra environment variables to add to the controller. +extraEnv: {} +## Example: +## MY_VAR: test +## MY_SECRET_VAR: +## secretKeyRef: +## key: test-key +## value: test-value + +## Controller Service Account Settings +## @param serviceAccount.create Specifies whether a ServiceAccount should be created +## @param serviceAccount.name The name of the ServiceAccount to use. +## If not set and create is true, a name is generated using the fullname template +## @param serviceAccount.annotations Additional annotations to add to the ServiceAccount +## +serviceAccount: + create: true + name: "" + annotations: {} + + +## Logging configuration +## @param log.level The level to log at. One of 'debug', 'info', or 'error'. +## @param log.stacktraceLevel The level to report stacktrace logs one of 'info' or 'error'. +## @param log.format The log format to use. One of console, json. +log: + format: json + level: info + stacktraceLevel: error diff --git a/charts/redpanda/redpanda/Chart.lock b/charts/redpanda/redpanda/Chart.lock index 70eaa0151..600b26781 100644 --- a/charts/redpanda/redpanda/Chart.lock +++ b/charts/redpanda/redpanda/Chart.lock @@ -6,4 +6,4 @@ dependencies: repository: https://charts.redpanda.com version: 0.1.5 digest: sha256:94c1a5a4f7d20096c89eca271067220c3f02e23d2738923ce009ea411bcff028 -generated: "2023-09-28T20:04:52.501304722Z" +generated: "2023-10-03T22:38:01.853283583Z" diff --git a/charts/redpanda/redpanda/Chart.yaml b/charts/redpanda/redpanda/Chart.yaml index 5b7cf7126..2399f11fd 100644 --- a/charts/redpanda/redpanda/Chart.yaml +++ b/charts/redpanda/redpanda/Chart.yaml @@ -37,4 +37,4 @@ name: redpanda sources: - https://github.com/redpanda-data/helm-charts type: application -version: 5.5.3 +version: 5.6.0 diff --git a/charts/redpanda/redpanda/ci/21-eks-tiered-storage-with-creds-values.yaml.tpl b/charts/redpanda/redpanda/ci/21-eks-tiered-storage-with-creds-values.yaml.tpl index c5f9490fb..da8d6f5a8 100644 --- a/charts/redpanda/redpanda/ci/21-eks-tiered-storage-with-creds-values.yaml.tpl +++ b/charts/redpanda/redpanda/ci/21-eks-tiered-storage-with-creds-values.yaml.tpl @@ -14,13 +14,14 @@ # limitations under the License. --- storage: - tieredConfig: - cloud_storage_enabled: true - cloud_storage_credentials_source: config_file - cloud_storage_access_key: "${AWS_ACCESS_KEY_ID}" - cloud_storage_secret_key: "${AWS_SECRET_ACCESS_KEY}" - cloud_storage_region: "${AWS_REGION}" - cloud_storage_bucket: "${TEST_BUCKET}" - cloud_storage_segment_max_upload_interval_sec: 1 + tiered: + config: + cloud_storage_enabled: true + cloud_storage_credentials_source: config_file + cloud_storage_access_key: "${AWS_ACCESS_KEY_ID}" + cloud_storage_secret_key: "${AWS_SECRET_ACCESS_KEY}" + cloud_storage_region: "${AWS_REGION}" + cloud_storage_bucket: "${TEST_BUCKET}" + cloud_storage_segment_max_upload_interval_sec: 1 enterprise: license: "${REDPANDA_SAMPLE_LICENSE}" \ No newline at end of file diff --git a/charts/redpanda/redpanda/ci/22-gke-tiered-storage-with-creds-values.yaml.tpl b/charts/redpanda/redpanda/ci/22-gke-tiered-storage-with-creds-values.yaml.tpl index 1fcc1413a..f456972ff 100644 --- a/charts/redpanda/redpanda/ci/22-gke-tiered-storage-with-creds-values.yaml.tpl +++ b/charts/redpanda/redpanda/ci/22-gke-tiered-storage-with-creds-values.yaml.tpl @@ -14,15 +14,16 @@ # limitations under the License. --- storage: - tieredConfig: - cloud_storage_enabled: true - cloud_storage_api_endpoint: storage.googleapis.com - cloud_storage_credentials_source: config_file - cloud_storage_region: "US-WEST1" - cloud_storage_bucket: "${TEST_BUCKET}" - cloud_storage_segment_max_upload_interval_sec: 1 - cloud_storage_access_key: "${GCP_ACCESS_KEY_ID}" - cloud_storage_secret_key: "${GCP_SECRET_ACCESS_KEY}" + tiered: + config: + cloud_storage_enabled: true + cloud_storage_api_endpoint: storage.googleapis.com + cloud_storage_credentials_source: config_file + cloud_storage_region: "US-WEST1" + cloud_storage_bucket: "${TEST_BUCKET}" + cloud_storage_segment_max_upload_interval_sec: 1 + cloud_storage_access_key: "${GCP_ACCESS_KEY_ID}" + cloud_storage_secret_key: "${GCP_SECRET_ACCESS_KEY}" enterprise: license: "${REDPANDA_SAMPLE_LICENSE}" diff --git a/charts/redpanda/redpanda/ci/23-aks-tiered-storage-with-creds-values.yaml.tpl b/charts/redpanda/redpanda/ci/23-aks-tiered-storage-with-creds-values.yaml.tpl index f45186e23..e559095d7 100644 --- a/charts/redpanda/redpanda/ci/23-aks-tiered-storage-with-creds-values.yaml.tpl +++ b/charts/redpanda/redpanda/ci/23-aks-tiered-storage-with-creds-values.yaml.tpl @@ -16,13 +16,16 @@ storage: persistentVolume: storageClass: managed-csi - tieredConfig: - cloud_storage_enabled: true - cloud_storage_credentials_source: config_file - cloud_storage_segment_max_upload_interval_sec: 1 - cloud_storage_azure_storage_account: ${TEST_STORAGE_ACCOUNT} - cloud_storage_azure_container: ${TEST_STORAGE_CONTAINER} - cloud_storage_azure_shared_key: ${TEST_AZURE_SHARED_KEY} + tiered: + persistentVolume: + storageClass: managed-csi + config: + cloud_storage_enabled: true + cloud_storage_credentials_source: config_file + cloud_storage_segment_max_upload_interval_sec: 1 + cloud_storage_azure_storage_account: ${TEST_STORAGE_ACCOUNT} + cloud_storage_azure_container: ${TEST_STORAGE_CONTAINER} + cloud_storage_azure_shared_key: ${TEST_AZURE_SHARED_KEY} enterprise: license: "${REDPANDA_SAMPLE_LICENSE}" diff --git a/charts/redpanda/redpanda/templates/NOTES.txt b/charts/redpanda/redpanda/templates/NOTES.txt index d56ae7635..686f8c345 100644 --- a/charts/redpanda/redpanda/templates/NOTES.txt +++ b/charts/redpanda/redpanda/templates/NOTES.txt @@ -37,11 +37,7 @@ Any rpk command that's given to the user in in this file must be defined in _exa {{- $anySASL := (include "sasl-enabled" . | fromJson).bool }} {{- $rpk := deepCopy . }} -{{- $_ := set $rpk "rpk" ( - printf "kubectl -n %s exec -ti %s-0 -c redpanda -- rpk" - .Release.Namespace - (include "redpanda.fullname" .)) -}} +{{- $_ := set $rpk "rpk" "rpk" }} Congratulations on installing {{ .Chart.Name }}! @@ -56,18 +52,42 @@ If you are using the load balancer service with a cloud provider, the services w {{ printf "helm upgrade %s redpanda/redpanda -n %s --set $(kubectl get svc -n %s -o jsonpath='{\"external.addresses={\"}{ range .items[*]}{.status.loadBalancer.ingress[0].ip }{.status.loadBalancer.ingress[0].hostname}{\",\"}{ end }{\"}\\n\"}')" (include "redpanda.name" .) .Release.Namespace .Release.Namespace }} {{- end }} +Set up rpk for access to your external listeners: +{{- $profile := keys .Values.listeners.kafka.external | first -}} +{{ if (include "tls-enabled" . | fromJson).bool }} + {{- $external := dig "tls" "cert" .Values.listeners.kafka.tls.cert (get .Values.listeners.kafka.external $profile )}} + kubectl get secret -n {{ .Release.Namespace }} {{ include "redpanda.fullname" . }}-{{ $external }}-cert -o go-template='{{ "{{" }} index .data "ca.crt" | base64decode }}' > ca.crt + {{- if or .Values.listeners.kafka.tls.requireClientAuth .Values.listeners.admin.tls.requireClientAuth }} + kubectl get secret -n {{ .Release.Namespace }} {{ include "redpanda.fullname" . }}-client -o go-template='{{ "{{" }} index .data "tls.crt" | base64decode }}' > tls.crt + kubectl get secret -n {{ .Release.Namespace }} {{ include "redpanda.fullname" . }}-client -o go-template='{{ "{{" }} index .data "tls.key" | base64decode }}' > tls.key + {{- end }} +{{- end }} + rpk profile create --from-profile <(kubectl get configmap -n {{ .Release.Namespace }} {{ include "redpanda.fullname" . }}-rpk -o go-template='{{ "{{" }} .data.profile }}') {{ $profile }} + +Set up dns to look up the pods on their Kubernetes Nodes. You can use this query to get the list of short-names to IP addresses. Add your external domain to the hostnames and you could test by adding these to your /etc/hosts: + + kubectl get pod -n {{ .Release.Namespace }} -o custom-columns=node:.status.hostIP,name:.metadata.name --no-headers -l app.kubernetes.io/name=redpanda,app.kubernetes.io/component=redpanda-statefulset + +{{- if and $anySASL }} + +Set the credentials in the environment: + + kubectl -n {{ .Release.Namespace }} get secret {{ .Values.auth.sasl.secretRef }} -o go-template="{{ "{{" }} range .data }}{{ "{{" }} . | base64decode }}{{ "{{" }} end }}" | IFS=: read -r {{ include "rpk-sasl-environment-variables" . }} + export {{ include "rpk-sasl-environment-variables" . }} + +{{- end }} + Try some sample commands: {{- if and $anySASL }} -{{- $_ := set $rpk "dummySasl" true }} - Create a user: {{ include "rpk-acl-user-create" $rpk }} Give the user permissions: - {{ include "rpk-acl-create" $rpk }} + {{ include "rpk-acl-create" $rpk }} + {{- end }} Get the api status: diff --git a/charts/redpanda/redpanda/templates/_configmap.tpl b/charts/redpanda/redpanda/templates/_configmap.tpl index fcc3a9586..e29e33507 100644 --- a/charts/redpanda/redpanda/templates/_configmap.tpl +++ b/charts/redpanda/redpanda/templates/_configmap.tpl @@ -26,9 +26,9 @@ limitations under the License. It's impossible to do a rolling upgrade from not-tls-enabled rpc to tls-enabled rpc. */ -}} {{- $check := list - (include "redpanda-atleast-23-1-2" .|fromJson).bool - (include "redpanda-22-3-atleast-22-3-13" .|fromJson).bool - (include "redpanda-22-2-atleast-22-2-10" .|fromJson).bool + (include "redpanda-atleast-23-1-2" .|fromJson).bool + (include "redpanda-22-3-atleast-22-3-13" .|fromJson).bool + (include "redpanda-22-2-atleast-22-2-10" .|fromJson).bool -}} {{- $wantedRPCTLS := (include "rpc-tls-enabled" . | fromJson).bool -}} {{- if and (not (mustHas true $check)) $wantedRPCTLS -}} @@ -61,23 +61,23 @@ limitations under the License. {{- end -}} {{- end -}} - bootstrap.yaml: | - kafka_enable_authorization: {{ (include "sasl-enabled" . | fromJson).bool }} - enable_sasl: {{ (include "sasl-enabled" . | fromJson).bool }} - enable_rack_awareness: {{ .Values.rackAwareness.enabled }} - {{- if $users }} - superusers: {{ toJson $users }} - {{- end }} - {{- with (dig "cluster" dict .Values.config) }} - {{- range $key, $element := .}} - {{- if or (eq (typeOf $element) "bool") $element }} - {{ $key }}: {{ $element | toYaml }} - {{- end }} +bootstrap.yaml: | + kafka_enable_authorization: {{ (include "sasl-enabled" . | fromJson).bool }} + enable_sasl: {{ (include "sasl-enabled" . | fromJson).bool }} + enable_rack_awareness: {{ .Values.rackAwareness.enabled }} +{{- with $users }} + superusers: {{ toYaml . | nindent 4 }} +{{- end }} +{{- with (dig "cluster" dict .Values.config) }} + {{- range $key, $element := .}} + {{- if or (eq (typeOf $element) "bool") $element }} + {{- dict $key $element | toYaml | nindent 2 }} {{- end }} + {{- end }} {{- end }} - {{- include "tunable" . }} + {{- include "tunable" . | nindent 2 }} {{- if and (not (hasKey .Values.config.cluster "storage_min_free_bytes")) ((include "redpanda-atleast-22-2-0" . | fromJson).bool) }} - storage_min_free_bytes: {{ include "storage-min-free-bytes" . }} + storage_min_free_bytes: {{ include "storage-min-free-bytes" . }} {{- end }} {{- if and (include "is-licensed" . | fromJson).bool (include "storage-tiered-config" .|fromJson).cloud_storage_enabled }} {{- $tieredStorageConfig := (include "storage-tiered-config" .|fromJson) }} @@ -86,84 +86,85 @@ limitations under the License. {{- $tieredStorageConfig = unset $tieredStorageConfig "cloud_storage_credentials_source"}} {{- end }} {{- range $key, $element := $tieredStorageConfig}} + {{- if or (eq (typeOf $element) "bool") $element }} + {{- dict $key $element | toYaml | nindent 2 }} + {{- end }} + {{- end }} +{{- end }} + +redpanda.yaml: | + config_file: /etc/redpanda/redpanda.yaml +{{- if .Values.logging.usageStats.enabled }} + {{- with (dig "usageStats" "organization" "" .Values.logging) }} + organization: {{ . }} + {{- end }} + {{- with (dig "usageStats" "clusterId" "" .Values.logging) }} + cluster_id: {{ . }} + {{- end }} +{{- end }} + redpanda: +{{- if (include "redpanda-atleast-22-3-0" . | fromJson).bool }} + empty_seed_starts_cluster: false +{{- end }} + kafka_enable_authorization: {{ (include "sasl-enabled" . | fromJson).bool }} + enable_sasl: {{ (include "sasl-enabled" . | fromJson).bool }} +{{- if $users }} + superusers: {{ toJson $users }} +{{- end }} +{{- with (dig "cluster" dict .Values.config) }} + {{- range $key, $element := . }} {{- if or (eq (typeOf $element) "bool") $element }} {{ $key }}: {{ $element | toYaml }} {{- end }} {{- end }} {{- end }} - redpanda.yaml: | - config_file: /etc/redpanda/redpanda.yaml -{{- if .Values.logging.usageStats.enabled }} - {{- with (dig "usageStats" "organization" "" .Values.logging) }} - organization: {{ . }} - {{- end }} - {{- with (dig "usageStats" "clusterId" "" .Values.logging) }} - cluster_id: {{ . }} +{{- with (dig "tunable" dict .Values.config) }} + {{- range $key, $element := .}} + {{- if or (eq (typeOf $element) "bool") $element }} + {{ $key }}: {{ $element | toYaml }} + {{- end }} {{- end }} {{- end }} - redpanda: -{{- if (include "redpanda-atleast-22-3-0" . | fromJson).bool }} - empty_seed_starts_cluster: false +{{- if not (hasKey .Values.config.cluster "storage_min_free_bytes") }} + storage_min_free_bytes: {{ include "storage-min-free-bytes" . }} {{- end }} - kafka_enable_authorization: {{ (include "sasl-enabled" . | fromJson).bool }} - enable_sasl: {{ (include "sasl-enabled" . | fromJson).bool }} - {{- if $users }} - superusers: {{ toJson $users }} +{{- with dig "node" dict .Values.config }} + {{- range $key, $element := .}} + {{- if and (or (eq (typeOf $element) "bool") $element) (and (eq $key "crash_loop_limit") (include "redpanda-atleast-23-1-1" $root | fromJson).bool) }} + {{ $key }}: {{ $element | toYaml }} + {{- end }} {{- end }} - {{- with (dig "cluster" dict .Values.config) }} - {{- range $key, $element := . }} - {{- if or (eq (typeOf $element) "bool") $element }} - {{ $key }}: {{ $element | toYaml }} - {{- end }} - {{- end }} - {{- end }} - {{- with (dig "tunable" dict .Values.config) }} - {{- range $key, $element := .}} - {{- if or (eq (typeOf $element) "bool") $element }} - {{ $key }}: {{ $element | toYaml }} - {{- end }} - {{- end }} - {{- end }} - {{- if not (hasKey .Values.config.cluster "storage_min_free_bytes") }} - storage_min_free_bytes: {{ include "storage-min-free-bytes" . }} - {{- end }} - {{- with dig "node" dict .Values.config }} - {{- range $key, $element := .}} - {{- if and (or (eq (typeOf $element) "bool") $element) (and (eq $key "crash_loop_limit") (include "redpanda-atleast-23-1-1" $root | fromJson).bool) }} - {{ $key }}: {{ $element | toYaml }} - {{- end }} - {{- end }} - {{- end }} -{{- /* LISTENERS */}} -{{- /* Admin API */}} +{{- end -}} +{{/* LISTENERS */}} +{{/* Admin API */}} {{- $service := .Values.listeners.admin }} - admin: - - name: internal - address: 0.0.0.0 - port: {{ $service.port }} + admin: + - name: internal + address: 0.0.0.0 + port: {{ $service.port }} {{- range $name, $listener := $service.external }} -{{- if and $listener.port $name }} - - name: {{ $name }} - address: 0.0.0.0 - port: {{ $listener.port }} + {{- if and $listener.port $name }} + - name: {{ $name }} + address: 0.0.0.0 + port: {{ $listener.port }} + {{- end }} {{- end }} -{{- end }} - admin_api_tls: + admin_api_tls: {{- if (include "admin-internal-tls-enabled" . | fromJson).bool }} - - name: internal - enabled: true - cert_file: /etc/tls/certs/{{ $service.tls.cert }}/tls.crt - key_file: /etc/tls/certs/{{ $service.tls.cert }}/tls.key - require_client_auth: {{ $service.tls.requireClientAuth }} + - name: internal + enabled: true + cert_file: /etc/tls/certs/{{ $service.tls.cert }}/tls.crt + key_file: /etc/tls/certs/{{ $service.tls.cert }}/tls.key + require_client_auth: {{ $service.tls.requireClientAuth }} {{- $cert := get .Values.tls.certs $service.tls.cert }} {{- if empty $cert }} {{- fail (printf "Certificate, '%s', used but not defined")}} {{- end }} {{- if $cert.caEnabled }} - truststore_file: /etc/tls/certs/{{ $service.tls.cert }}/ca.crt + truststore_file: /etc/tls/certs/{{ $service.tls.cert }}/ca.crt {{- else }} - {{- /* This is a required field so we use the default in the redpanda debian container */}} - truststore_file: /etc/ssl/certs/ca-certificates.crt + {{/* This is a required field so we use the default in the redpanda debian container */}} + truststore_file: /etc/ssl/certs/ca-certificates.crt {{- end }} {{- end }} {{- range $name, $listener := $service.external }} @@ -177,52 +178,52 @@ limitations under the License. {{- if empty $cert }} {{- fail (printf "Certificate, '%s', used but not defined" $certName)}} {{- end }} - - name: {{ $name }} - enabled: true - cert_file: {{ $certPath }}/tls.crt - key_file: {{ $certPath }}/tls.key - require_client_auth: {{ $mtls }} + - name: {{ $name }} + enabled: true + cert_file: {{ $certPath }}/tls.crt + key_file: {{ $certPath }}/tls.key + require_client_auth: {{ $mtls }} {{- if $cert.caEnabled }} - truststore_file: {{ $certPath }}/ca.crt + truststore_file: {{ $certPath }}/ca.crt {{- else }} - {{- /* This is a required field so we use the default in the redpanda debian container */}} - truststore_file: /etc/ssl/certs/ca-certificates.crt + {{- /* This is a required field so we use the default in the redpanda debian container */}} + truststore_file: /etc/ssl/certs/ca-certificates.crt {{- end }} {{- end }} -{{- end }} -{{- /* Kafka API */}} +{{- end -}} +{{/* Kafka API */}} {{- $kafkaService := .Values.listeners.kafka }} - kafka_api: - - name: internal - address: 0.0.0.0 - port: {{ $kafkaService.port }} - {{- if or (include "sasl-enabled" $root | fromJson).bool $kafkaService.authenticationMethod }} - authentication_method: {{ default "sasl" $kafkaService.authenticationMethod }} - {{- end }} -{{- range $name, $listener := $kafkaService.external }} - - name: {{ $name }} - address: 0.0.0.0 - port: {{ $listener.port }} - {{- if or (include "sasl-enabled" $root | fromJson).bool $listener.authenticationMethod }} - authentication_method: {{ default "sasl" $listener.authenticationMethod }} - {{- end }} + kafka_api: + - name: internal + address: 0.0.0.0 + port: {{ $kafkaService.port }} +{{- if or (include "sasl-enabled" $root | fromJson).bool $kafkaService.authenticationMethod }} + authentication_method: {{ default "sasl" $kafkaService.authenticationMethod }} {{- end }} - kafka_api_tls: +{{- range $name, $listener := $kafkaService.external }} + - name: {{ $name }} + address: 0.0.0.0 + port: {{ $listener.port }} + {{- if or (include "sasl-enabled" $root | fromJson).bool $listener.authenticationMethod }} + authentication_method: {{ default "sasl" $listener.authenticationMethod }} + {{- end }} +{{- end }} + kafka_api_tls: {{- if (include "kafka-internal-tls-enabled" . | fromJson).bool }} - - name: internal - enabled: true - cert_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/tls.crt - key_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/tls.key - require_client_auth: {{ $kafkaService.tls.requireClientAuth }} + - name: internal + enabled: true + cert_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/tls.crt + key_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/tls.key + require_client_auth: {{ $kafkaService.tls.requireClientAuth }} {{- $cert := get .Values.tls.certs $kafkaService.tls.cert }} {{- if empty $cert }} {{- fail (printf "Certificate, '%s', used but not defined")}} {{- end }} {{- if $cert.caEnabled }} - truststore_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/ca.crt + truststore_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/ca.crt {{- else }} - {{- /* This is a required field so we use the default in the redpanda debian container */}} - truststore_file: /etc/ssl/certs/ca-certificates.crt + {{/* This is a required field so we use the default in the redpanda debian container */}} + truststore_file: /etc/ssl/certs/ca-certificates.crt {{- end }} {{- end }} {{- range $name, $listener := $kafkaService.external }} @@ -236,96 +237,95 @@ limitations under the License. {{- if empty $cert }} {{- fail (printf "Certificate, '%s', used but not defined" $certName)}} {{- end }} - - name: {{ $name }} - enabled: true - cert_file: {{ $certPath }}/tls.crt - key_file: {{ $certPath }}/tls.key - require_client_auth: {{ $mtls }} + - name: {{ $name }} + enabled: true + cert_file: {{ $certPath }}/tls.crt + key_file: {{ $certPath }}/tls.key + require_client_auth: {{ $mtls }} {{- if $cert.caEnabled }} - truststore_file: {{ $certPath }}/ca.crt + truststore_file: {{ $certPath }}/ca.crt {{- else }} - {{- /* This is a required field so we use the default in the redpanda debian container */}} - truststore_file: /etc/ssl/certs/ca-certificates.crt + {{/* This is a required field so we use the default in the redpanda debian container */}} + truststore_file: /etc/ssl/certs/ca-certificates.crt {{- end }} {{- end }} -{{- end }} -{{- /* RPC Server */}} +{{- end -}} +{{/* RPC Server */}} {{- $service = .Values.listeners.rpc }} - rpc_server: - address: 0.0.0.0 - port: {{ $service.port }} + rpc_server: + address: 0.0.0.0 + port: {{ $service.port }} {{- if (include "rpc-tls-enabled" . | fromJson).bool }} - rpc_server_tls: - enabled: true - cert_file: /etc/tls/certs/{{ $service.tls.cert }}/tls.crt - key_file: /etc/tls/certs/{{ $service.tls.cert }}/tls.key - require_client_auth: {{ $service.tls.requireClientAuth }} + rpc_server_tls: + enabled: true + cert_file: /etc/tls/certs/{{ $service.tls.cert }}/tls.crt + key_file: /etc/tls/certs/{{ $service.tls.cert }}/tls.key + require_client_auth: {{ $service.tls.requireClientAuth }} {{- $cert := get .Values.tls.certs $service.tls.cert }} {{- if empty $cert }} {{- fail (printf "Certificate, '%s', used but not defined")}} {{- end }} {{- if $cert.caEnabled }} - truststore_file: /etc/tls/certs/{{ $service.tls.cert }}/ca.crt + truststore_file: /etc/tls/certs/{{ $service.tls.cert }}/ca.crt {{- else }} - {{- /* This is a required field so we use the default in the redpanda debian container */}} - truststore_file: /etc/ssl/certs/ca-certificates.crt + {{- /* This is a required field so we use the default in the redpanda debian container */}} + truststore_file: /etc/ssl/certs/ca-certificates.crt {{- end }} -{{- end }} - seed_servers: -{{- with $root.tempConfigMapServerList -}} - {{- . | trim | nindent 8 }} {{- end -}} +{{- with $root.tempConfigMapServerList }} + seed_servers: {{ toYaml . | nindent 6 }} +{{- end }} {{- if and (include "is-licensed" . | fromJson).bool (include "storage-tiered-config" .|fromJson).cloud_storage_enabled }} {{- $tieredStorageConfig := (include "storage-tiered-config" .|fromJson) }} {{- if not (include "redpanda-atleast-22-3-0" . | fromJson).bool }} - {{- $tieredStorageConfig = unset $tieredStorageConfig "cloud_storage_credentials_source"}} + {{- $tieredStorageConfig = unset $tieredStorageConfig "cloud_storage_credentials_source" }} {{- end }} - {{- range $key, $element := $tieredStorageConfig}} + {{- range $key, $element := $tieredStorageConfig }} {{- if or (eq (typeOf $element) "bool") $element }} - {{ $key }}: {{ $element | toYaml }} + {{- dict $key $element | toYaml | nindent 2 -}} {{- end }} {{- end }} {{- end }} -{{- /* Schema Registry API */}} +{{/* Schema Registry API */}} {{- if and .Values.listeners.schemaRegistry.enabled (include "redpanda-22-2-x-without-sasl" $root | fromJson).bool }} {{- $schemaRegistryService := .Values.listeners.schemaRegistry }} - schema_registry_client: - brokers: + schema_registry_client: + brokers: {{- range (include "seed-server-list" $root | mustFromJson) }} - - address: {{ . }} - port: {{ $kafkaService.port }} + - address: {{ . }} + port: {{ $kafkaService.port }} {{- end }} {{- if (include "kafka-internal-tls-enabled" . | fromJson).bool }} - broker_tls: - enabled: true - require_client_auth: {{ $kafkaService.tls.requireClientAuth }} - cert_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/tls.crt - key_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/tls.key + broker_tls: + enabled: true + require_client_auth: {{ $kafkaService.tls.requireClientAuth }} + cert_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/tls.crt + key_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/tls.key {{- $cert := get .Values.tls.certs $kafkaService.tls.cert }} {{- if empty $cert }} {{- fail (printf "Certificate, '%s', used but not defined")}} {{- end }} {{- if $cert.caEnabled }} - truststore_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/ca.crt + truststore_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/ca.crt {{- else }} - {{- /* This is a required field so we use the default in the redpanda debian container */}} - truststore_file: /etc/ssl/certs/ca-certificates.crt + {{- /* This is a required field so we use the default in the redpanda debian container */}} + truststore_file: /etc/ssl/certs/ca-certificates.crt {{- end }} {{- end }} {{- with .Values.config.schema_registry_client }} {{- toYaml . | nindent 6 }} {{- end }} - schema_registry: - schema_registry_api: - - name: internal - address: 0.0.0.0 - port: {{ $schemaRegistryService.port }} + schema_registry: + schema_registry_api: + - name: internal + address: 0.0.0.0 + port: {{ $schemaRegistryService.port }} {{- if or (include "sasl-enabled" $root | fromJson).bool $schemaRegistryService.authenticationMethod }} - authentication_method: {{ default "http_basic" $schemaRegistryService.authenticationMethod }} + authentication_method: {{ default "http_basic" $schemaRegistryService.authenticationMethod }} {{- end }} {{- range $name, $listener := $schemaRegistryService.external }} - - name: {{ $name }} - address: 0.0.0.0 + - name: {{ $name }} + address: 0.0.0.0 {{- /* when upgrading from an older version that had a missing port, fail if we cannot guess a default this should work in all cases as the older versions would have failed with multiple listeners anyway @@ -333,27 +333,27 @@ limitations under the License. {{- if and (empty $listener.port) (ne (len $schemaRegistryService.external) 1) }} {{- fail "missing required port for schemaRegistry listener $listener.name" }} {{- end }} - port: {{ $listener.port | default 8084 }} + port: {{ $listener.port | default 8084 }} {{- if or (include "sasl-enabled" $root | fromJson).bool $listener.authenticationMethod }} - authentication_method: {{ default "http_basic" $listener.authenticationMethod }} + authentication_method: {{ default "http_basic" $listener.authenticationMethod }} {{- end }} {{- end }} - schema_registry_api_tls: + schema_registry_api_tls: {{- if (include "schemaRegistry-internal-tls-enabled" . | fromJson).bool }} - - name: internal - enabled: true - cert_file: /etc/tls/certs/{{ $schemaRegistryService.tls.cert }}/tls.crt - key_file: /etc/tls/certs/{{ $schemaRegistryService.tls.cert }}/tls.key - require_client_auth: {{ $schemaRegistryService.tls.requireClientAuth }} + - name: internal + enabled: true + cert_file: /etc/tls/certs/{{ $schemaRegistryService.tls.cert }}/tls.crt + key_file: /etc/tls/certs/{{ $schemaRegistryService.tls.cert }}/tls.key + require_client_auth: {{ $schemaRegistryService.tls.requireClientAuth }} {{- $cert := get .Values.tls.certs $schemaRegistryService.tls.cert }} {{- if empty $cert }} {{- fail (printf "Certificate, '%s', used but not defined")}} {{- end }} {{- if $cert.caEnabled }} - truststore_file: /etc/tls/certs/{{ $schemaRegistryService.tls.cert }}/ca.crt + truststore_file: /etc/tls/certs/{{ $schemaRegistryService.tls.cert }}/ca.crt {{- else }} - {{- /* This is a required field so we use the default in the redpanda debian container */}} - truststore_file: /etc/ssl/certs/ca-certificates.crt + {{- /* This is a required field so we use the default in the redpanda debian container */}} + truststore_file: /etc/ssl/certs/ca-certificates.crt {{- end }} {{- end }} {{- range $name, $listener := $schemaRegistryService.external }} @@ -367,81 +367,81 @@ limitations under the License. {{- if empty $cert }} {{- fail (printf "Certificate, '%s', used but not defined")}} {{- end }} - - name: {{ $name }} - enabled: true - cert_file: {{ $certPath }}/tls.crt - key_file: {{ $certPath }}/tls.key - require_client_auth: {{ $mtls }} + - name: {{ $name }} + enabled: true + cert_file: {{ $certPath }}/tls.crt + key_file: {{ $certPath }}/tls.key + require_client_auth: {{ $mtls }} {{- if $cert.caEnabled }} - truststore_file: {{ $certPath }}/ca.crt + truststore_file: {{ $certPath }}/ca.crt {{- else }} - {{- /* This is a required field so we use the default in the redpanda debian container */}} - truststore_file: /etc/ssl/certs/ca-certificates.crt + {{- /* This is a required field so we use the default in the redpanda debian container */}} + truststore_file: /etc/ssl/certs/ca-certificates.crt {{- end }} {{- end }} {{- end }} -{{- end }} -{{- /* HTTP Proxy */}} +{{- end -}} +{{/* HTTP Proxy */}} {{- if and .Values.listeners.http.enabled (include "redpanda-22-2-x-without-sasl" $root | fromJson).bool }} {{- $HTTPService := .Values.listeners.http }} - pandaproxy_client: - brokers: - {{- range (include "seed-server-list" $root | mustFromJson) }} - - address: {{ . }} - port: {{ $kafkaService.port }} - {{- end }} - {{- if (include "kafka-internal-tls-enabled" . | fromJson).bool }} - broker_tls: - enabled: true - require_client_auth: {{ $kafkaService.tls.requireClientAuth }} - cert_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/tls.crt - key_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/tls.key - {{- $cert := get .Values.tls.certs $kafkaService.tls.cert }} - {{- if empty $cert }} - {{- fail (printf "Certificate, '%s', used but not defined")}} - {{- end }} - {{- if $cert.caEnabled }} - truststore_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/ca.crt - {{- else }} - {{- /* This is a required field so we use the default in the redpanda debian container */}} - truststore_file: /etc/ssl/certs/ca-certificates.crt - {{- end }} - {{- with .Values.config.pandaproxy_client }} - {{- toYaml . | nindent 6 }} - {{- end }} - {{- end }} - pandaproxy: - pandaproxy_api: - - name: internal - address: 0.0.0.0 - port: {{ $HTTPService.port }} - {{- if or (include "sasl-enabled" $root | fromJson).bool $HTTPService.authenticationMethod }} - authentication_method: {{ default "http_basic" $HTTPService.authenticationMethod }} - {{- end }} - {{- range $name, $listener := $HTTPService.external }} - - name: {{ $name }} - address: 0.0.0.0 - port: {{ $listener.port }} - {{- if or (include "sasl-enabled" $root | fromJson).bool $listener.authenticationMethod }} - authentication_method: {{ default "http_basic" $listener.authenticationMethod }} - {{- end }} + pandaproxy_client: + brokers: + {{- range (include "seed-server-list" $root | mustFromJson) }} + - address: {{ . }} + port: {{ $kafkaService.port }} {{- end }} - pandaproxy_api_tls: - {{- if (include "http-internal-tls-enabled" . | fromJson).bool }} - - name: internal - enabled: true - cert_file: /etc/tls/certs/{{ $HTTPService.tls.cert }}/tls.crt - key_file: /etc/tls/certs/{{ $HTTPService.tls.cert }}/tls.key - require_client_auth: {{ $HTTPService.tls.requireClientAuth }} + {{- if (include "kafka-internal-tls-enabled" . | fromJson).bool }} + broker_tls: + enabled: true + require_client_auth: {{ $kafkaService.tls.requireClientAuth }} + cert_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/tls.crt + key_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/tls.key + {{- $cert := get .Values.tls.certs $kafkaService.tls.cert }} + {{- if empty $cert }} + {{- fail (printf "Certificate, '%s', used but not defined")}} + {{- end }} + {{- if $cert.caEnabled }} + truststore_file: /etc/tls/certs/{{ $kafkaService.tls.cert }}/ca.crt + {{- else }} + {{- /* This is a required field so we use the default in the redpanda debian container */}} + truststore_file: /etc/ssl/certs/ca-certificates.crt + {{- end }} + {{- with .Values.config.pandaproxy_client }} + {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} + pandaproxy: + pandaproxy_api: + - name: internal + address: 0.0.0.0 + port: {{ $HTTPService.port }} +{{- if or (include "sasl-enabled" $root | fromJson).bool $HTTPService.authenticationMethod }} + authentication_method: {{ default "http_basic" $HTTPService.authenticationMethod }} +{{- end }} +{{- range $name, $listener := $HTTPService.external }} + - name: {{ $name }} + address: 0.0.0.0 + port: {{ $listener.port }} + {{- if or (include "sasl-enabled" $root | fromJson).bool $listener.authenticationMethod }} + authentication_method: {{ default "http_basic" $listener.authenticationMethod }} + {{- end }} +{{- end }} + pandaproxy_api_tls: +{{- if (include "http-internal-tls-enabled" . | fromJson).bool }} + - name: internal + enabled: true + cert_file: /etc/tls/certs/{{ $HTTPService.tls.cert }}/tls.crt + key_file: /etc/tls/certs/{{ $HTTPService.tls.cert }}/tls.key + require_client_auth: {{ $HTTPService.tls.requireClientAuth }} {{- $cert := get .Values.tls.certs $HTTPService.tls.cert }} {{- if empty $cert }} {{- fail (printf "Certificate, '%s', used but not defined")}} {{- end }} {{- if $cert.caEnabled }} - truststore_file: /etc/tls/certs/{{ $HTTPService.tls.cert }}/ca.crt + truststore_file: /etc/tls/certs/{{ $HTTPService.tls.cert }}/ca.crt {{- else }} - {{- /* This is a required field so we use the default in the redpanda debian container */}} - truststore_file: /etc/ssl/certs/ca-certificates.crt + {{- /* This is a required field so we use the default in the redpanda debian container */}} + truststore_file: /etc/ssl/certs/ca-certificates.crt {{- end }} {{- end }} {{- range $name, $listener := $HTTPService.external }} @@ -455,61 +455,145 @@ limitations under the License. {{- if empty $cert }} {{- fail (printf "Certificate, '%s', used but not defined")}} {{- end }} - - name: {{ $name }} - enabled: true - cert_file: {{ $certPath }}/tls.crt - key_file: {{ $certPath }}/tls.key - require_client_auth: {{ $mtls }} + - name: {{ $name }} + enabled: true + cert_file: {{ $certPath }}/tls.crt + key_file: {{ $certPath }}/tls.key + require_client_auth: {{ $mtls }} {{- if $cert.caEnabled }} - truststore_file: {{ $certPath }}/ca.crt + truststore_file: {{ $certPath }}/ca.crt {{- else }} - {{- /* This is a required field so we use the default in the redpanda debian container */}} - truststore_file: /etc/ssl/certs/ca-certificates.crt + {{- /* This is a required field so we use the default in the redpanda debian container */}} + truststore_file: /etc/ssl/certs/ca-certificates.crt {{- end }} {{- end }} {{- end }} {{- end }} -{{- /* END LISTENERS */}} +{{/* END LISTENERS */}} +{{- end -}} - rpk: -{{- with (dig "rpk" dict .Values.config) }} - {{- . | toYaml | nindent 6}} -{{- end }} - enable_usage_stats: {{ .Values.logging.usageStats.enabled }} - overprovisioned: {{ dig "cpu" "overprovisioned" false .Values.resources }} - enable_memory_locking: {{ dig "memory" "enable_memory_locking" false .Values.resources }} -{{- if hasKey .Values.tuning "tune_aio_events" }} - tune_aio_events: {{ .Values.tuning.tune_aio_events }} -{{- end }} -{{- if hasKey .Values.tuning "tune_clocksource" }} - tune_clocksource: {{ .Values.tuning.tune_clocksource }} -{{- end }} -{{- if hasKey .Values.tuning "tune_ballast_file" }} - tune_ballast_file: {{ .Values.tuning.tune_ballast_file }} -{{- end }} -{{- if hasKey .Values.tuning "ballast_file_path" }} - ballast_file_path: {{ .Values.tuning.ballast_file_path }} -{{- end }} -{{- if hasKey .Values.tuning "ballast_file_size" }} - ballast_file_size: {{ .Values.tuning.ballast_file_size }} -{{- end }} -{{- if hasKey .Values.tuning "well_known_io" }} - well_known_io: {{ .Values.tuning.well_known_io }} -{{- end }} +{{- define "rpk-config-internal" -}} + {{- $brokers := list -}} + {{- $admin := list -}} + {{- range $i := untilStep 0 (.Values.statefulset.replicas|int) 1 -}} + {{- $podName := printf "%s-%d.%s" (include "redpanda.fullname" $) $i (include "redpanda.internal.domain" $) -}} + {{- $brokers = concat $brokers (list (printf "%s:%d" $podName (int $.Values.listeners.kafka.port))) -}} + {{- $admin = concat $admin (list (printf "%s:%d" $podName (int $.Values.listeners.admin.port))) -}} + {{- end -}} +rpk: + # redpanda server configuration + overprovisioned: {{ dig "cpu" "overprovisioned" false .Values.resources }} + enable_memory_locking: {{ dig "memory" "enable_memory_locking" false .Values.resources }} + additional_start_flags: + - "--smp={{ include "redpanda-smp" . }}" + - "--memory={{ template "redpanda-memory" . }}M" + - "--reserve-memory={{ template "redpanda-reserve-memory" . }}M" + - "--default-log-level={{ .Values.logging.logLevel }}" + {{- with .Values.statefulset.additionalRedpandaCmdFlags -}} + {{- toYaml . | nindent 4 }} + {{- end }} + + {{- with dig "config" "rpk" dict .Values.AsMap }} + # config.rpk entries + {{- toYaml . | nindent 2 }} + {{- end }} + + {{- with dig "tuning" dict .Values.AsMap }} + # rpk tune entries + {{- toYaml . | nindent 2 }} + {{- end }} + + # kafka connection configuration + kafka_api: + brokers: {{ toYaml $brokers | nindent 6 }} + tls: + {{- if (include "kafka-internal-tls-enabled" . | fromJson).bool }} + {{- $cert := get .Values.tls.certs .Values.listeners.kafka.tls.cert }} + {{- if $cert.caEnabled }} + truststore_file: {{ printf "/etc/tls/certs/%s/ca.crt" .Values.listeners.kafka.tls.cert }} + {{- end }} + {{- if .Values.listeners.kafka.tls.requireClientAuth }} + cert_file: {{ printf "/etc/tls/certs/%s-client/tls.crt" (include "redpanda.fullname" .) }} + key_file: {{ printf "/etc/tls/certs/%s-client/tls.key" (include "redpanda.fullname" .) }} + {{- end }} + {{- end }} + admin_api: + addresses: {{ toYaml $admin | nindent 6 }} + tls: + {{- if (include "admin-internal-tls-enabled" . | fromJson).bool }} + {{- $cert := get .Values.tls.certs .Values.listeners.admin.tls.cert }} + {{- if $cert.caEnabled }} + truststore_file: {{ printf "/etc/tls/certs/%s/ca.crt" .Values.listeners.admin.tls.cert }} + {{- end }} + {{- if .Values.listeners.admin.tls.requireClientAuth }} + cert_file: {{ printf "/etc/tls/certs/%s-client/tls.crt" (include "redpanda.fullname" .) }} + key_file: {{ printf "/etc/tls/certs/%s-client/tls.key" (include "redpanda.fullname" .) }} + {{- end }} + {{- end }} {{- end -}} {{- define "configmap-server-list" -}} -{{- $root := . }} -{{- range (include "seed-server-list" $root | mustFromJson) }} -- host: - address: {{ . }} - port: {{ $root.Values.listeners.rpc.port }} -{{- end }} + {{- $serverList := list -}} + {{- range (include "seed-server-list" . | mustFromJson) -}} + {{- $server := dict "host" (dict "address" . "port" $.Values.listeners.rpc.port) -}} + {{- $serverList = append $serverList $server -}} + {{- end -}} + {{- toJson (dict "serverList" $serverList) -}} {{- end -}} -{{- define "configmap-with-server-list" -}} -{{- $root := . }} -{{- $serverList := (include "configmap-server-list" $root ) -}} -{{- $r := set $root "tempConfigMapServerList" ( $serverList ) }} -{{ include "configmap-content-no-seed" $r }} -{{- end -}} \ No newline at end of file +{{- define "full-configmap" -}} + {{- $serverList := (fromJson (include "configmap-server-list" .)).serverList -}} + {{- $r := set . "tempConfigMapServerList" $serverList -}} + {{ include "configmap-content-no-seed" $r | nindent 0 }} + {{ include "rpk-config-internal" $ | nindent 2 }} +{{- end -}} + +{{- define "rpk-config-external" -}} + {{- $brokers := list -}} + {{- $admin := list -}} + {{- $profile := keys .Values.listeners.kafka.external | first -}} + {{- $kafkaListener := get .Values.listeners.kafka.external $profile -}} + {{- $adminprofile := keys .Values.listeners.admin.external | first -}} + {{- $adminListener := get .Values.listeners.admin.external $adminprofile -}} + {{- range $i := until (.Values.statefulset.replicas|int) -}} + {{- $externalAdvertiseAddress := printf "%s-%d" (include "redpanda.fullname" $) $i -}} + {{- if (tpl ($.Values.external.domain | default "") $) -}} + {{- $externalAdvertiseAddress = printf "%s.%s" $externalAdvertiseAddress (tpl $.Values.external.domain $) -}} + {{- end -}} + {{- $tmplVals := dict "listenerVals" $.Values.listeners.kafka "externalVals" $kafkaListener "externalName" $profile "externalAdvertiseAddress" $externalAdvertiseAddress "values" $.Values "replicaIndex" $i -}} + {{- $port := int (include "advertised-port" $tmplVals) -}} + {{- $host := fromJson (include "advertised-host" (mustMerge $tmplVals (dict "port" $port) $)) -}} + {{- $brokers = concat $brokers (list (printf "%s:%d" (get $host "address") (get $host "port" | int))) -}} + {{- $tmplVals = dict "listenerVals" $.Values.listeners.admin "externalVals" $adminListener "externalName" $profile "externalAdvertiseAddress" $externalAdvertiseAddress "values" $.Values "replicaIndex" $i -}} + {{- $port = int (include "advertised-port" $tmplVals) -}} + {{- $host = fromJson (include "advertised-host" (mustMerge $tmplVals (dict "port" $port) $)) -}} + {{- $admin = concat $admin (list (printf "%s:%d" (get $host "address") (get $host "port" | int))) -}} + {{- end -}} +name: {{ $profile }} +kafka_api: + brokers: {{ toYaml $brokers | nindent 6 }} + tls: + {{- if (include "kafka-external-tls-enabled" (dict "Values" .Values "listener" $kafkaListener) | fromJson).bool }} + {{- $cert := get .Values.tls.certs .Values.listeners.kafka.tls.cert }} + {{- if $cert.caEnabled }} + ca_file: ca.crt + {{- end }} + {{- if .Values.listeners.kafka.tls.requireClientAuth }} + cert_file: tls.crt + key_file: tls.key + {{- end }} + {{- end }} +admin_api: + addresses: {{ toYaml $admin | nindent 6 }} + tls: + {{- if (include "admin-external-tls-enabled" (dict "Values" .Values "listener" $adminListener) | fromJson).bool }} + {{- $cert := get .Values.tls.certs .Values.listeners.admin.tls.cert }} + {{- if $cert.caEnabled }} + ca_file: ca.crt + {{- end }} + {{- if .Values.listeners.admin.tls.requireClientAuth }} + cert_file: tls.crt + key_file: tls.key + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/redpanda/redpanda/templates/_example-commands.tpl b/charts/redpanda/redpanda/templates/_example-commands.tpl index 8fda2c701..edc5af6b4 100644 --- a/charts/redpanda/redpanda/templates/_example-commands.tpl +++ b/charts/redpanda/redpanda/templates/_example-commands.tpl @@ -23,58 +23,30 @@ and tested in a test. {{/* tested in tests/test-kafka-sasl-status.yaml */}} {{- define "rpk-acl-user-create" -}} -{{ .rpk }} acl user create myuser --new-password changeme --mechanism {{ include "sasl-mechanism" . }} {{ include "rpk-acl-user-flags" . }} +{{ .rpk }} acl user create myuser --new-password changeme --mechanism {{ include "sasl-mechanism" . }} {{- end -}} {{/* tested in tests/test-kafka-sasl-status.yaml */}} {{- define "rpk-acl-create" -}} -{{- $dummySasl := .dummySasl -}} -{{- if $dummySasl -}} -{{ .rpk }} acl create --allow-principal 'myuser' --allow-host '*' --operation all --topic 'test-topic' {{ include "rpk-flags-no-admin-no-sasl" . }} {{ include "rpk-dummy-sasl" . }} -{{- else -}} -{{ .rpk }} acl create --allow-principal 'myuser' --allow-host '*' --operation all --topic 'test-topic' {{ include "rpk-flags-no-admin" . }} -{{- end -}} +{{ .rpk }} acl create --allow-principal 'myuser' --allow-host '*' --operation all --topic 'test-topic' {{- end -}} {{/* tested in tests/test-kafka-sasl-status.yaml */}} {{- define "rpk-cluster-info" -}} -{{- $dummySasl := .dummySasl -}} -{{- if $dummySasl -}} -{{ .rpk }} cluster info {{ include "rpk-flags-no-admin-no-sasl" . }} {{ include "rpk-dummy-sasl" . }} -{{- else -}} -{{ .rpk }} cluster info {{ include "rpk-flags-no-admin" . }} -{{- end -}} +{{ .rpk }} cluster info {{- end -}} {{/* tested in tests/test-kafka-sasl-status.yaml */}} {{- define "rpk-topic-create" -}} -{{- $flags := fromJson (include "rpk-flags" .) -}} -{{- $dummySasl := .dummySasl -}} -{{- if $dummySasl -}} -{{ .rpk }} topic create test-topic -p 3 -r {{ .Values.statefulset.replicas | int64 }} {{ include "rpk-flags-no-admin-no-sasl" . }} {{ include "rpk-dummy-sasl" . }} -{{- else -}} -{{ .rpk }} topic create test-topic -p 3 -r {{ .Values.statefulset.replicas | int64 }} {{ include "rpk-flags-no-admin" . }} -{{- end -}} +{{ .rpk }} topic create test-topic -p 3 -r {{ min (int64 .Values.statefulset.replicas) 3 }} {{- end -}} {{/* tested in tests/test-kafka-sasl-status.yaml */}} {{- define "rpk-topic-describe" -}} -{{- $flags := fromJson (include "rpk-flags" .) -}} -{{- $dummySasl := .dummySasl -}} -{{- if $dummySasl -}} -{{ .rpk }} topic describe test-topic {{ include "rpk-flags-no-admin-no-sasl" . }} {{ include "rpk-dummy-sasl" . }} -{{- else -}} -{{ .rpk }} topic describe test-topic {{ include "rpk-flags-no-admin" . }} -{{- end -}} +{{ .rpk }} topic describe test-topic {{- end -}} {{/* tested in tests/test-kafka-sasl-status.yaml */}} {{- define "rpk-topic-delete" -}} -{{- $flags := fromJson (include "rpk-flags" .) -}} -{{- $dummySasl := $.dummySasl -}} -{{- if $dummySasl -}} -{{ .rpk }} topic delete test-topic {{ include "rpk-flags-no-admin-no-sasl" . }} {{ include "rpk-dummy-sasl" . }} -{{- else -}} -{{ .rpk }} topic delete test-topic {{ include "rpk-flags-no-admin" . }} -{{- end -}} +{{ .rpk }} topic delete test-topic {{- end -}} diff --git a/charts/redpanda/redpanda/templates/_helpers.tpl b/charts/redpanda/redpanda/templates/_helpers.tpl index c56741098..9f846d2b6 100644 --- a/charts/redpanda/redpanda/templates/_helpers.tpl +++ b/charts/redpanda/redpanda/templates/_helpers.tpl @@ -99,10 +99,7 @@ Use AppVersion if image.tag is not set {{- $tag -}} {{- end -}} -{{/* -Generate configuration needed for rpk -*/}} - +{{/* Generate internal fqdn */}} {{- define "redpanda.internal.domain" -}} {{- $service := include "redpanda.servicename" . -}} {{- $ns := .Release.Namespace -}} @@ -382,112 +379,6 @@ than 1 core. {{- dig "sasl" "mechanism" "SCRAM-SHA-512" .Values.auth -}} {{- end -}} -{{- define "rpk-flags" -}} - {{- $root := . -}} - {{- $admin := list -}} - {{- $admin = concat $admin (list "--api-urls" (include "admin-api-urls" . )) -}} - {{- if (include "admin-internal-tls-enabled" . | fromJson).bool -}} - {{- $admin = concat $admin (list - "--admin-api-tls-enabled" - "--admin-api-tls-truststore" - (printf "/etc/tls/certs/%s/ca.crt" .Values.listeners.admin.tls.cert)) - -}} - {{- end -}} - {{- $kafka := list -}} - {{- if (include "kafka-internal-tls-enabled" . | fromJson).bool -}} - {{- $kafka = concat $kafka (list - "--tls-enabled" - "--tls-truststore" - (printf "/etc/tls/certs/%s/ca.crt" .Values.listeners.kafka.tls.cert)) - -}} - {{- end -}} - {{- $sasl := list -}} - {{- if (include "sasl-enabled" . | fromJson).bool -}} - {{- $sasl = concat $sasl (list - "--user" ( print "$(find /etc/secrets/users/* -print | sed -n 1p | xargs cat | sed -n 1p | tr ':' '\n' | sed -n 1p )" | quote ) - "--password" ( print "$(find /etc/secrets/users/* -print | sed -n 1p | xargs cat | sed -n 1p | tr ':' '\n' | sed -n 2p )" | quote ) - "--sasl-mechanism" ( printf "$(find /etc/secrets/users/* -print | sed -n 1p | xargs cat | sed -n 1p | tr ':' '\n' | sed -n 3p | grep . || echo %s )" (include "sasl-mechanism" .) | quote ) - ) - -}} - {{- end -}} - {{- $brokers := list -}} - {{- range $i := untilStep 0 (.Values.statefulset.replicas|int) 1 -}} - {{- $brokers = concat $brokers (list (printf "%s-%d.%s:%d" - (include "redpanda.fullname" $root) - $i - (include "redpanda.internal.domain" $root) - (int $root.Values.listeners.kafka.port))) - -}} - {{- end -}} - {{- $brokersFlag := printf "--brokers %s" (join "," $brokers) -}} -{{- toJson (dict "admin" (join " " $admin) "kafka" (join " " $kafka) "sasl" (join " " $sasl) "brokers" $brokersFlag) -}} -{{- end -}} - -{{- define "rpk-common-flags" -}} -{{- $flags := fromJson (include "rpk-flags" .) -}} -{{ join " " (list $flags.brokers $flags.admin $flags.sasl $flags.kafka)}} -{{- end -}} - -{{- define "rpk-flags-no-admin" -}} -{{- $flags := fromJson (include "rpk-flags" .) -}} -{{ join " " (list $flags.brokers $flags.kafka $flags.sasl)}} -{{- end -}} - -{{- define "rpk-flags-no-sasl" -}} -{{- $flags := fromJson (include "rpk-flags" .) -}} -{{ join " " (list $flags.brokers $flags.admin $flags.kafka)}} -{{- end -}} - -{{- define "rpk-flags-no-brokers-no-sasl" -}} -{{- $flags := fromJson (include "rpk-flags" .) -}} -{{ $flags.admin }} -{{- end -}} - -{{- define "rpk-acl-user-flags" }} -{{- $root := . -}} -{{- $admin := list -}} - {{- $apiUrls := list -}} - {{- range $i := untilStep 0 (.Values.statefulset.replicas|int) 1 -}} - {{- $apiUrls = concat $apiUrls (list (printf "%s-%d.%s:%d" - (include "redpanda.fullname" $root) - $i - (include "redpanda.internal.domain" $root) - (int $root.Values.listeners.admin.port))) - -}} - {{- end -}} - {{- $admin = concat $admin (list "--api-urls" (join "," $apiUrls)) -}} - {{- if (include "admin-internal-tls-enabled" . | fromJson).bool -}} - {{- $admin = concat $admin (list - "--admin-api-tls-enabled" - "--admin-api-tls-truststore" - (printf "/etc/tls/certs/%s/ca.crt" .Values.listeners.admin.tls.cert)) - -}} - {{- end -}} -{{ join " " $admin }} -{{- end -}} - -{{- define "rpk-flags-no-admin-no-sasl" -}} -{{- $flags := fromJson (include "rpk-flags" .) -}} -{{ join " " (list $flags.brokers $flags.kafka)}} -{{- end -}} - -{{- define "rpk-dummy-sasl" -}} -{{- if (include "sasl-enabled" . | fromJson).bool -}} -{{ "--user --password --sasl-mechanism " -}} -{{- else -}} -{{ "" }} -{{- end -}} -{{- end -}} - -{{- define "rpk-topic-flags" -}} -{{- $flags := fromJson (include "rpk-flags" .) -}} - {{- if (include "sasl-enabled" . | fromJson).bool -}} - {{- join " " (list $flags.brokers $flags.kafka $flags.sasl) -}} - {{- else -}} - {{- join " " (list $flags.brokers $flags.kafka) -}} - {{- end -}} -{{- end -}} - {{- define "storage-min-free-bytes" -}} {{- $fiveGiB := 5368709120 -}} {{- if dig "enabled" false .Values.storage.persistentVolume -}} @@ -498,23 +389,23 @@ than 1 core. {{- end -}} {{- define "tunable" -}} -{{- $tunable := dig "tunable" dict .Values.config -}} -{{- if (include "redpanda-atleast-22-3-0" . | fromJson).bool -}} -{{- range $key, $element := $tunable }} - {{- if or (eq (typeOf $element) "bool") $element }} - {{ $key }}: {{ $element | toYaml }} + {{- $tunable := dig "tunable" dict .Values.config -}} + {{- if (include "redpanda-atleast-22-3-0" . | fromJson).bool -}} + {{- range $key, $element := $tunable }} + {{- if or (eq (typeOf $element) "bool") $element }} +{{ $key }}: {{ $element | toYaml }} + {{- end }} {{- end }} -{{- end }} -{{- else if (include "redpanda-atleast-22-2-0" . | fromJson).bool -}} -{{- $tunable = unset $tunable "log_segment_size_min" -}} -{{- $tunable = unset $tunable "log_segment_size_max" -}} -{{- $tunable = unset $tunable "kafka_batch_max_bytes" -}} -{{- range $key, $element := $tunable }} - {{- if or (eq (typeOf $element) "bool") $element }} - {{ $key }}: {{ $element | toYaml }} + {{- else if (include "redpanda-atleast-22-2-0" . | fromJson).bool -}} + {{- $tunable = unset $tunable "log_segment_size_min" -}} + {{- $tunable = unset $tunable "log_segment_size_max" -}} + {{- $tunable = unset $tunable "kafka_batch_max_bytes" -}} + {{- range $key, $element := $tunable }} + {{- if or (eq (typeOf $element) "bool") $element }} +{{ $key }}: {{ $element | toYaml }} + {{- end }} {{- end }} -{{- end }} -{{- end -}} + {{- end -}} {{- end -}} {{- define "fail-on-insecure-sasl-logging" -}} @@ -548,6 +439,9 @@ than 1 core. {{- define "redpanda-22-2-atleast-22-2-10" -}} {{- toJson (dict "bool" (or (not (eq .Values.image.repository "docker.redpanda.com/redpandadata/redpanda")) (include "redpanda.semver" . | semverCompare ">=22.2.10-0,<22.3"))) -}} {{- end -}} +{{- define "redpanda-atleast-23-2-1" -}} +{{- toJson (dict "bool" (or (not (eq .Values.image.repository "docker.redpanda.com/redpandadata/redpanda")) (include "redpanda.semver" . | semverCompare ">=23.2.1-0 || <0.0.1-0"))) -}} +{{- end -}} {{- define "redpanda-22-2-x-without-sasl" -}} {{- $result := (include "redpanda-atleast-22-3-0" . | fromJson).bool -}} @@ -743,6 +637,8 @@ return licenseSecretRef.key checks deprecated values entry if current values emp - name: redpanda-{{ $name }}-cert mountPath: {{ printf "/etc/tls/certs/%s" $name }} {{- end }} +- name: mtls-client + mountPath: /etc/ls/certs/{{ template "redpanda.fullname" $ }}-client {{- end }} {{- end -}} @@ -763,6 +659,10 @@ return licenseSecretRef.key checks deprecated values entry if current values emp secretName: {{ template "cert-secret-name" $r }} defaultMode: 0o440 {{- end }} +- name: mtls-client + secret: + secretName: {{ template "redpanda.fullname" $ }}-client + defaultMode: 0o440 {{- end -}} {{- if and .Values.auth.sasl.enabled (not (empty .Values.auth.sasl.secretRef )) }} - name: users @@ -804,3 +704,17 @@ hostPath {{- define "storage-tiered-config" -}} {{- dig "tieredConfig" .Values.storage.tiered.config .Values.storage | toJson -}} {{- end -}} + +{{/* + rpk sasl environment variables + + this will return a string with the correct environment variables to use for SASL based on the + version of the redpada container being used +*/}} +{{- define "rpk-sasl-environment-variables" -}} +{{- if (include "redpanda-atleast-23-2-1" . | fromJson).bool -}} +RPK_USER RPK_PASS RPK_SASL_MECHANISM +{{- else -}} +REDPANDA_SASL_USERNAME REDPANDA_SASL_PASSWORD REDPANDA_SASL_MECHANISM +{{- end -}} +{{- end -}} diff --git a/charts/redpanda/redpanda/templates/certs.yaml b/charts/redpanda/redpanda/templates/certs.yaml index 015aa2162..674aa2e20 100644 --- a/charts/redpanda/redpanda/templates/certs.yaml +++ b/charts/redpanda/redpanda/templates/certs.yaml @@ -73,4 +73,32 @@ spec: {{- end }} {{- end }} {{- end }} -{{- end }} +--- + {{- $name := .Values.listeners.kafka.tls.cert }} + {{- $data := get .Values.tls.certs $name }} +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ template "redpanda.fullname" $ }}-client + namespace: {{ $ns | quote }} + {{- with include "full.labels" $root }} + labels: {{- . | nindent 4 }} + {{- end }} +spec: + commonName: {{ template "redpanda.fullname" $ }}-client + duration: {{ $data.duration | default "43800h" }} + isCA: false + secretName: {{ template "redpanda.fullname" $ }}-client + privateKey: + algorithm: ECDSA + size: 256 + {{- if not (empty $data.issuerRef) }} + issuerRef: {{- toYaml $data.issuerRef | nindent 4 }} + group: cert-manager.io + {{- else }} + issuerRef: + name: {{ template "redpanda.fullname" $ }}-{{ $name }}-root-issuer + kind: Issuer + group: cert-manager.io + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/redpanda/redpanda/templates/configmap.yaml b/charts/redpanda/redpanda/templates/configmap.yaml index e87531719..5f5704c9f 100644 --- a/charts/redpanda/redpanda/templates/configmap.yaml +++ b/charts/redpanda/redpanda/templates/configmap.yaml @@ -24,5 +24,19 @@ metadata: {{- with include "full.labels" . }} {{- . | nindent 4 }} {{- end }} +data: {{ include "full-configmap" . | nindent 2 }} + +{{- if .Values.external.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "redpanda.fullname" . }}-rpk + namespace: {{ .Release.Namespace | quote }} + labels: +{{- with include "full.labels" . }} + {{- . | nindent 4 }} +{{- end }} data: - {{ include "configmap-with-server-list" . | trim }} + profile: | {{ include "rpk-config-external" . | nindent 4 }} +{{- end }} \ No newline at end of file diff --git a/charts/redpanda/redpanda/templates/post-install-upgrade-job.yaml b/charts/redpanda/redpanda/templates/post-install-upgrade-job.yaml index 208365080..aac06a0b9 100644 --- a/charts/redpanda/redpanda/templates/post-install-upgrade-job.yaml +++ b/charts/redpanda/redpanda/templates/post-install-upgrade-job.yaml @@ -83,10 +83,10 @@ spec: - | set -e {{- if (include "redpanda-atleast-22-2-0" . | fromJson).bool }} - {{- if not (empty (include "enterprise-secret" . ) ) }} - rpk cluster license set "$REDPANDA_LICENSE" {{ template "rpk-acl-user-flags" $ }} - {{- else if not ( empty (include "enterprise-license" . ) ) }} - rpk cluster license set {{ include "enterprise-license" . | quote }} {{ template "rpk-acl-user-flags" $ }} + {{- if not (empty (include "enterprise-secret" . )) }} + rpk cluster license set "$REDPANDA_LICENSE" + {{- else if not (empty (include "enterprise-license" . )) }} + rpk cluster license set {{ .Values.license_key | quote }} {{- end }} {{- end }} {{- with .Values.post_install_job.resources }} diff --git a/charts/redpanda/redpanda/templates/post-upgrade.yaml b/charts/redpanda/redpanda/templates/post-upgrade.yaml index 3be4244be..2816e9378 100644 --- a/charts/redpanda/redpanda/templates/post-upgrade.yaml +++ b/charts/redpanda/redpanda/templates/post-upgrade.yaml @@ -15,7 +15,6 @@ See the License for the specific language governing permissions and limitations under the License. */}} {{- if .Values.post_upgrade_job.enabled }} -{{- $rpkFlags := include "rpk-acl-user-flags" . }} {{- $sasl := .Values.auth.sasl }} {{- $root := deepCopy . }} apiVersion: batch/v1 @@ -70,14 +69,14 @@ spec: args: - | set -e - rpk cluster config import -f /etc/redpanda/bootstrap.yaml {{ $rpkFlags }} + rpk cluster config import -f /etc/redpanda/bootstrap.yaml {{- range $key, $value := .Values.config.cluster }} {{- if $value }} - rpk cluster config set {{ $key }} {{ $value }} {{ $rpkFlags }} + rpk cluster config set {{ $key }} {{ $value }} {{- end }} {{- end }} {{- if not (hasKey .Values.config.cluster "storage_min_free_bytes") }} - rpk cluster config set storage_min_free_bytes {{ include "storage-min-free-bytes" . }} {{ $rpkFlags }} + rpk cluster config set storage_min_free_bytes {{ include "storage-min-free-bytes" . }} {{- end }} {{- with .Values.post_upgrade_job.resources }} resources: diff --git a/charts/redpanda/redpanda/templates/secrets.yaml b/charts/redpanda/redpanda/templates/secrets.yaml index eb2fc034c..97ca27bf8 100644 --- a/charts/redpanda/redpanda/templates/secrets.yaml +++ b/charts/redpanda/redpanda/templates/secrets.yaml @@ -74,7 +74,7 @@ stringData: # Setup and export SASL bootstrap-user IFS=":" read -r USER_NAME PASSWORD MECHANISM < $(find /etc/secrets/users/* -print) MECHANISM=${MECHANISM:-{{- include "sasl-mechanism" . }}} - rpk acl user create ${USER_NAME} --password=${PASSWORD} --mechanism ${MECHANISM} {{ template "rpk-flags-no-brokers-no-sasl" $ }} || true + rpk acl user create ${USER_NAME} --password=${PASSWORD} --mechanism ${MECHANISM} || true {{- end }} touch /tmp/postStartHookFinished @@ -166,10 +166,17 @@ type: Opaque stringData: sasl-user.sh: |- #!/usr/bin/env bash + + trap 'error_handler $? $LINENO' ERR + + error_handler() { + echo "Error: ($1) occurred at line $2" + } + set -e echo "Waiting for cluster to be ready" - rpk cluster health {{ include "rpk-acl-user-flags" . }} --watch --exit-when-healthy + rpk cluster health --watch --exit-when-healthy {{- if and $sasl.enabled (not (empty $sasl.secretRef )) }} while true; do @@ -201,21 +208,21 @@ stringData: fi echo "Creating user ${USER_NAME}..." MECHANISM=${MECHANISM:-{{- include "sasl-mechanism" . }}} - creation_result=$(rpk acl user create ${USER_NAME} --password=${PASSWORD} --mechanism ${MECHANISM} {{ include "rpk-acl-user-flags" $ }} 2>&1) && creation_result_exit_code=$? || creation_result_exit_code=$? # On a non-success exit code + creation_result=$(rpk acl user create ${USER_NAME} --password=${PASSWORD} --mechanism ${MECHANISM} 2>&1) && creation_result_exit_code=$? || creation_result_exit_code=$? # On a non-success exit code if [[ $creation_result_exit_code -ne 0 ]]; then # Check if the stderr contains "User already exists" # this error occurs when password has changed if [[ $creation_result == *"User already exists"* ]]; then echo "Update user ${USER_NAME}" # we will try to update by first deleting - deletion_result=$(rpk acl user delete ${USER_NAME} {{ include "rpk-acl-user-flags" $ }} 2>&1) && deletion_result_exit_code=$? || deletion_result_exit_code=$? + deletion_result=$(rpk acl user delete ${USER_NAME} 2>&1) && deletion_result_exit_code=$? || deletion_result_exit_code=$? if [[ $deletion_result_exit_code -ne 0 ]]; then echo "deletion of user ${USER_NAME} failed: ${deletion_result}" READ_LIST_SUCCESS=1 break fi # Now we update the user - update_result=$(rpk acl user create ${USER_NAME} --password=${PASSWORD} --mechanism ${MECHANISM} {{ include "rpk-acl-user-flags" $ }} 2>&1) && update_result_exit_code=$? || update_result_exit_code=$? # On a non-success exit code + update_result=$(rpk acl user create ${USER_NAME} --password=${PASSWORD} --mechanism ${MECHANISM} 2>&1) && update_result_exit_code=$? || update_result_exit_code=$? # On a non-success exit code if [[ $update_result_exit_code -ne 0 ]]; then echo "updating user ${USER_NAME} failed: ${update_result}" READ_LIST_SUCCESS=1 @@ -239,7 +246,7 @@ stringData: if [[ -n "${USERS_LIST}" && ${READ_LIST_SUCCESS} ]]; then echo "Setting superusers configurations with users [${USERS_LIST}]" - superuser_result=$(rpk cluster config set superusers [${USERS_LIST}] {{ template "rpk-acl-user-flags" $ }} 2>&1) && superuser_result_exit_code=$? || superuser_result_exit_code=$? + superuser_result=$(rpk cluster config set superusers [${USERS_LIST}] 2>&1) && superuser_result_exit_code=$? || superuser_result_exit_code=$? if [[ $superuser_result_exit_code -ne 0 ]]; then echo "Setting superusers configurations failed: ${superuser_result}" else diff --git a/charts/redpanda/redpanda/templates/services.nodeport.yaml b/charts/redpanda/redpanda/templates/services.nodeport.yaml index 776e6e281..87084034a 100644 --- a/charts/redpanda/redpanda/templates/services.nodeport.yaml +++ b/charts/redpanda/redpanda/templates/services.nodeport.yaml @@ -41,7 +41,7 @@ spec: {{- if $enabled }} - name: admin-{{ $name }} protocol: TCP - port: {{ $values.listeners.admin.port }} + port: {{ $listener.port }} nodePort: {{ first (dig "advertisedPorts" (list $listener.port) $listener) }} {{- end }} {{- end }} diff --git a/charts/redpanda/redpanda/templates/statefulset.yaml b/charts/redpanda/redpanda/templates/statefulset.yaml index 4a2f8291c..3c6787e5d 100644 --- a/charts/redpanda/redpanda/templates/statefulset.yaml +++ b/charts/redpanda/redpanda/templates/statefulset.yaml @@ -224,8 +224,8 @@ spec: - -c - | set -x - rpk cluster health {{ (include "rpk-flags" . | fromJson).admin }} - rpk cluster health {{ (include "rpk-flags" . | fromJson).admin }} | grep 'Healthy:.*true' + rpk cluster health + rpk cluster health | grep 'Healthy:.*true' initialDelaySeconds: {{ .Values.statefulset.readinessProbe.initialDelaySeconds }} failureThreshold: {{ .Values.statefulset.readinessProbe.failureThreshold }} periodSeconds: {{ .Values.statefulset.readinessProbe.periodSeconds }} @@ -234,14 +234,7 @@ spec: - rpk - redpanda - start - - --smp={{ include "redpanda-smp" . }} - - --memory={{ template "redpanda-memory" . }}M - - --reserve-memory={{ template "redpanda-reserve-memory" . }}M - - --default-log-level={{ .Values.logging.logLevel }} - - --advertise-rpc-addr={{ $internalAdvertiseAddress }}:{{ .Values.listeners.rpc.port }} - {{- with .Values.statefulset.additionalRedpandaCmdFlags }} - {{- toYaml . | nindent 12 }} - {{- end }} + - "--advertise-rpc-addr={{ $internalAdvertiseAddress }}:{{ .Values.listeners.rpc.port }}" ports: {{- range $name, $listener := .Values.listeners }} - name: {{ lower $name }} @@ -294,12 +287,13 @@ spec: securityContext: {{- toYaml .Values.statefulset.sideCars.configWatcher.securityContext | nindent 12 }} {{- end }} volumeMounts: {{ include "common-mounts" . | nindent 12 }} + - name: config + mountPath: /etc/redpanda + - name: {{ template "redpanda.fullname" . }}-config-watcher + mountPath: /etc/secrets/config-watcher/scripts {{- if dig "sideCars" "configWatcher" "extraVolumeMounts" false .Values.statefulset -}} {{ tpl .Values.statefulset.sideCars.configWatcher.extraVolumeMounts . | nindent 12 }} {{- end }} - - name: {{ template "redpanda.fullname" . }}-config-watcher - mountPath: /etc/secrets/config-watcher/scripts - readOnly: true {{- end }} {{- if and .Values.rbac.enabled .Values.statefulset.sideCars.controllers.enabled }} - name: redpanda-controllers @@ -364,12 +358,10 @@ spec: - name: {{ (include "redpanda.name" .) | trunc 51 }}-configurator secret: secretName: {{ (include "redpanda.name" .) | trunc 51 }}-configurator - optional: false defaultMode: 0o775 - name: {{ template "redpanda.fullname" . }}-config-watcher secret: secretName: {{ template "redpanda.fullname" . }}-config-watcher - optional: false defaultMode: 0o775 {{- if or .Values.statefulset.nodeAffinity .Values.statefulset.podAffinity .Values.statefulset.podAntiAffinity }} affinity: diff --git a/charts/redpanda/redpanda/templates/tests/test-connector-via-console.yaml b/charts/redpanda/redpanda/templates/tests/test-connector-via-console.yaml index dd1404a9a..9493a3bc7 100644 --- a/charts/redpanda/redpanda/templates/tests/test-connector-via-console.yaml +++ b/charts/redpanda/redpanda/templates/tests/test-connector-via-console.yaml @@ -57,32 +57,38 @@ spec: - bash - -c - | - {{- $testTopic := printf "test-topic-%s" (randNumeric 3) }} - rpk topic create {{ $testTopic }} {{ include "rpk-topic-flags" . }} - rpk topic list {{ include "rpk-topic-flags" . }} - echo "Test message!" | rpk topic produce {{ $testTopic }} {{ include "rpk-topic-flags" . }} - - SASL_MECHANISM="PLAIN" {{- if .Values.auth.sasl.enabled }} set -e set +x - IFS=: read -r CONNECT_SASL_USERNAME KAFKA_SASL_PASSWORD CONNECT_SASL_MECHANISM < $(find /etc/secrets/users/* -print) - CONNECT_SASL_MECHANISM=${CONNECT_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} - if [[ -n "$CONNECT_SASL_USERNAME" && -n "$KAFKA_SASL_PASSWORD" && -n "$CONNECT_SASL_MECHANISM" ]]; then - SASL_MECHANISM=$CONNECT_SASL_MECHANISM - JAAS_CONFIG_SOURCE="\"source.cluster.sasl.jaas.config\": \"org.apache.kafka.common.security.scram.ScramLoginModule required username=\\\\"\"${CONNECT_SASL_USERNAME}\\\\"\" password=\\\\"\"${KAFKA_SASL_PASSWORD}\\\\"\";\"," - JAAS_CONFIG_TARGET="\"target.cluster.sasl.jaas.config\": \"org.apache.kafka.common.security.scram.ScramLoginModule required username=\\\\"\"${CONNECT_SASL_USERNAME}\\\\"\" password=\\\\"\"${KAFKA_SASL_PASSWORD}\\\\"\";\"," - fi + echo "SASL enabled: reading credentials from $(find /etc/secrets/users/* -print)" + IFS=: read -r {{ include "rpk-sasl-environment-variables" . }} < $(find /etc/secrets/users/* -print) + {{- if (include "redpanda-atleast-23-2-1" . | fromJson).bool }} + RPK_SASL_MECHANISM=${RPK_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} + {{- else }} + REDPANDA_SASL_MECHANISM=${REDPANDA_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} + RPK_USER="${REDPANDA_SASL_USERNAME}" + RPK_PASS="${REDPANDA_SASL_PASSWORD}" + RPK_SASL_MECHANISM="${REDPANDA_SASL_MECHANISM}" + {{- end }} + export {{ include "rpk-sasl-environment-variables" . }} + + JAAS_CONFIG_SOURCE="\"source.cluster.sasl.jaas.config\": \"org.apache.kafka.common.security.scram.ScramLoginModule required username=\\\\"\"${RPK_USER}\\\\"\" password=\\\\"\"${RPK_PASS}\\\\"\";\"," + JAAS_CONFIG_TARGET="\"target.cluster.sasl.jaas.config\": \"org.apache.kafka.common.security.scram.ScramLoginModule required username=\\\\"\"${RPK_USER}\\\\"\" password=\\\\"\"${RPK_PASS}\\\\"\";\"," + {{- end }} + + {{- $testTopic := printf "test-topic-%s" (randNumeric 3) }} + rpk topic create {{ $testTopic }} + rpk topic list + echo "Test message!" | rpk topic produce {{ $testTopic }} set -x set +e - {{- end }} SECURITY_PROTOCOL=PLAINTEXT - if [[ -n "$CONNECT_SASL_MECHANISM" && $TLS_ENABLED == "true" ]]; then + if [[ -n "$RPK_SASL_MECHANISM" && $TLS_ENABLED == "true" ]]; then SECURITY_PROTOCOL="SASL_SSL" - elif [[ -n "$CONNECT_SASL_MECHANISM" ]]; then + elif [[ -n "$RPK_SASL_MECHANISM" ]]; then SECURITY_PROTOCOL="SASL_PLAINTEXT" elif [[ $TLS_ENABLED == "true" ]]; then SECURITY_PROTOCOL="SSL" @@ -122,61 +128,37 @@ spec: EOF sed -i "s/CONNECTOR_NAME/$CONNECTOR_NAME/g" /tmp/mm2-conf.json - sed -i "s/SASL_MECHANISM/$SASL_MECHANISM/g" /tmp/mm2-conf.json + sed -i "s/SASL_MECHANISM/$RPK_SASL_MECHANISM/g" /tmp/mm2-conf.json sed -i "s/SECURITY_PROTOCOL/$SECURITY_PROTOCOL/g" /tmp/mm2-conf.json set +x sed -i "s/JAAS_CONFIG_SOURCE/$JAAS_CONFIG_SOURCE/g" /tmp/mm2-conf.json sed -i "s/JAAS_CONFIG_TARGET/$JAAS_CONFIG_TARGET/g" /tmp/mm2-conf.json set -x - max_iteration=10 - for i in $(seq 1 $max_iteration) - do - curl -v -H 'Content-Type: application/json' http://{{ include "console.fullname" $consoleValues }}:{{ include "console.containerPort" $consoleValues }}/api/kafka-connect/clusters/connectors/connectors \ - -d @/tmp/mm2-conf.json && echo - - result=$? - if [[ $result -eq 0 ]] - then - echo "Result successful" - break - else - echo "Result unsuccessful" - sleep 1 - fi - done - - if [[ $result -ne 0 ]] + URL=http://{{ include "console.fullname" $consoleValues }}:{{ include "console.containerPort" $consoleValues }}/api/kafka-connect/clusters/connectors/connectors + {{/* outputting to /dev/null because the output contains the user password */}} + echo "Creating mm2 connector" + if curl -svm3 --fail --retry 120 --retry-max-time 120 --retry-all-errors -H 'Content-Type: application/json' -o /dev/null "${URL}" -d @/tmp/mm2-conf.json then + echo "Result successful" + else echo "mm2 connector can not be created!!!" exit 1 fi - rpk topic consume source.{{ $testTopic }} -n 1 {{ include "rpk-topic-flags" . }} + rpk topic consume source.{{ $testTopic }} -n 1 - for i in $(seq 1 $max_iteration) - do - curl -v -X DELETE http://{{ include "console.fullname" $consoleValues }}:{{ include "console.containerPort" $consoleValues }}/api/kafka-connect/clusters/connectors/connectors/$CONNECTOR_NAME && echo - - result=$? - if [[ $result -eq 0 ]] - then - echo "Result successful" - break - else - echo "Result unsuccessful" - sleep 1 - fi - done - - if [[ $result -ne 0 ]] + echo "Destroying mm2 connector" + if curl -svm3 --fail --retry 120 --retry-max-time 120 --retry-all-errors -o /dev/null -X DELETE "${URL}/${CONNECTOR_NAME}" then + echo "Result successful" + else echo "mm2 connector can not be destroyed!!!" exit 1 fi - rpk topic list {{ include "rpk-topic-flags" . }} - rpk topic delete {{ $testTopic }} source.{{ $testTopic }} mm2-offset-syncs.test-only-redpanda.internal {{ include "rpk-topic-flags" . }} + rpk topic list + rpk topic delete {{ $testTopic }} source.{{ $testTopic }} mm2-offset-syncs.test-only-redpanda.internal volumeMounts: {{ include "default-mounts" . | nindent 8 }} securityContext: {{ include "container-security-context" . | nindent 8 }} volumes: {{ include "default-volumes" . | nindent 4 }} diff --git a/charts/redpanda/redpanda/templates/tests/test-kafka-produce-consume.yaml b/charts/redpanda/redpanda/templates/tests/test-kafka-produce-consume.yaml index ee6e8e410..b84ad7b92 100644 --- a/charts/redpanda/redpanda/templates/tests/test-kafka-produce-consume.yaml +++ b/charts/redpanda/redpanda/templates/tests/test-kafka-produce-consume.yaml @@ -34,7 +34,7 @@ spec: securityContext: {{ include "pod-security-context" . | nindent 4 }} {{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 4 }} - {{- end }} +{{- end }} containers: - name: {{ template "redpanda.name" . }} image: {{ .Values.image.repository }}:{{ template "redpanda.tag" . }} @@ -56,17 +56,27 @@ spec: {{- if and (include "is-licensed" . | fromJson).bool (include "storage-tiered-config" .|fromJson).cloud_storage_enabled }} {{- $cloudStorageFlags = "-c retention.bytes=80 -c segment.bytes=40 -c redpanda.remote.read=true -c redpanda.remote.write=true"}} {{- end }} -{{- if $sasl.enabled }} - until rpk topic create produce.consume.test.$POD_NAME {{ include "rpk-topic-flags" . }} {{ $cloudStorageFlags }} +{{- if .Values.auth.sasl.enabled }} + old_setting=${-//[^x]/} + set +x + IFS=: read -r {{ include "rpk-sasl-environment-variables" . }} < $(find /etc/secrets/users/* -print) + {{- if (include "redpanda-atleast-23-2-1" . | fromJson).bool }} + RPK_SASL_MECHANISM=${RPK_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} + {{- else }} + REDPANDA_SASL_MECHANISM=${REDPANDA_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} + {{- end }} + export {{ include "rpk-sasl-environment-variables" . }} + if [[ -n "$old_setting" ]]; then set -x; fi +{{- end }} + until rpk topic create produce.consume.test.$POD_NAME {{ $cloudStorageFlags }} do sleep 2 done {{- range $i := until 100 }} - echo "Pandas are awesome!" | rpk topic produce produce.consume.test.$POD_NAME {{ include "rpk-topic-flags" $ }} + echo "Pandas are awesome!" | rpk topic produce produce.consume.test.$POD_NAME {{- end }} sleep 2 - rpk topic consume produce.consume.test.$POD_NAME -n 1 {{ include "rpk-topic-flags" . }} | grep "Pandas are awesome!" - rpk topic delete produce.consume.test.$POD_NAME {{ include "rpk-topic-flags" . }} -{{- end }} + rpk topic consume produce.consume.test.$POD_NAME -n 1 | grep "Pandas are awesome!" + rpk topic delete produce.consume.test.$POD_NAME volumeMounts: {{ include "default-mounts" . | nindent 8 }} resources: {{ toYaml .Values.statefulset.resources | nindent 12 }} securityContext: {{ include "container-security-context" . | nindent 8 }} diff --git a/charts/redpanda/redpanda/templates/tests/test-kafka-sasl-status.yaml b/charts/redpanda/redpanda/templates/tests/test-kafka-sasl-status.yaml index 6c88c0eec..6bc37e7b5 100644 --- a/charts/redpanda/redpanda/templates/tests/test-kafka-sasl-status.yaml +++ b/charts/redpanda/redpanda/templates/tests/test-kafka-sasl-status.yaml @@ -15,8 +15,6 @@ See the License for the specific language governing permissions and limitations under the License. */}} {{- if (include "sasl-enabled" . | fromJson).bool }} -{{- $testTopicFlags := mustRegexReplaceAll "--user \\S+ " (include "rpk-topic-flags" . ) "--user myuser" }} -{{- $testTopicFlags := mustRegexReplaceAll "--password \\S+ " $testTopicFlags "--password changeme" }} {{- $rpk := deepCopy . }} {{- $sasl := .Values.auth.sasl }} {{- $_ := set $rpk "rpk" "rpk" }} @@ -49,7 +47,21 @@ spec: - -c - | set -xe - until rpk acl user delete myuser {{ include "rpk-acl-user-flags" . }} + +{{- if .Values.auth.sasl.enabled }} + old_setting=${-//[^x]/} + set +x + IFS=: read -r {{ include "rpk-sasl-environment-variables" . }} < $(find /etc/secrets/users/* -print) + {{- if (include "redpanda-atleast-23-2-1" . | fromJson).bool }} + RPK_SASL_MECHANISM=${RPK_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} + {{- else }} + REDPANDA_SASL_MECHANISM=${REDPANDA_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} + {{- end }} + export {{ include "rpk-sasl-environment-variables" . }} + if [[ -n "$old_setting" ]]; then set -x; fi +{{- end }} + + until rpk acl user delete myuser do sleep 2 done sleep 3 @@ -61,7 +73,7 @@ spec: {{ include "rpk-topic-create" $rpk }} {{ include "rpk-topic-describe" $rpk }} {{ include "rpk-topic-delete" $rpk }} - rpk acl user delete myuser {{ include "rpk-acl-user-flags" . }} + rpk acl user delete myuser volumeMounts: {{ include "default-mounts" . | nindent 8 }} resources: {{- toYaml .Values.statefulset.resources | nindent 12 }} diff --git a/charts/redpanda/redpanda/templates/tests/test-pandaproxy-internal-tls-status.yaml b/charts/redpanda/redpanda/templates/tests/test-pandaproxy-internal-tls-status.yaml index 0329753b9..b1873029d 100644 --- a/charts/redpanda/redpanda/templates/tests/test-pandaproxy-internal-tls-status.yaml +++ b/charts/redpanda/redpanda/templates/tests/test-pandaproxy-internal-tls-status.yaml @@ -43,14 +43,24 @@ spec: command: [ "/bin/bash", "-c" ] args: - | - {{- if $sasl.enabled }} - USERNAME=$(find /etc/secrets/users/* -print | sed -n 1p | xargs cat | sed -n 1p | tr ':' '\n' | sed -n 1p ) - PASSWORD=$(find /etc/secrets/users/* -print | sed -n 1p | xargs cat | sed -n 1p | tr ':' '\n' | sed -n 2p ) + {{- if .Values.auth.sasl.enabled }} + old_setting=${-//[^x]/} + set +x + IFS=: read -r {{ include "rpk-sasl-environment-variables" . }} < $(find /etc/secrets/users/* -print) + {{- if (include "redpanda-atleast-23-2-1" . | fromJson).bool }} + RPK_SASL_MECHANISM=${RPK_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} + {{- else }} + REDPANDA_SASL_MECHANISM=${REDPANDA_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} {{- end }} + export {{ include "rpk-sasl-environment-variables" . }} + RPK_USER="${RPK_USER:-${REDPANDA_SASL_USERNAME}}" + RPK_PASS="${RPK_PASS:-${REDPANDA_SASL_PASSWORD}}" + if [[ -n "$old_setting" ]]; then set -x; fi + {{- end }} curl -svm3 --fail --retry "120" --retry-max-time "120" --retry-all-errors --ssl-reqd \ {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.http.authenticationMethod }} - -u $USERNAME:$PASSWORD \ + -u ${RPK_USER}:${RPK_PASS} \ {{- end }} {{- if $cert.caEnabled }} --cacert /etc/tls/certs/{{ $service.tls.cert }}/ca.crt \ @@ -59,7 +69,7 @@ spec: curl -svm3 --fail --retry "120" --retry-max-time "120" --retry-all-errors --ssl-reqd \ {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.http.authenticationMethod }} - -u $USERNAME:$PASSWORD \ + -u ${RPK_USER}:${RPK_PASS} \ {{- end }} {{- if $cert.caEnabled }} --cacert /etc/tls/certs/{{ $service.tls.cert }}/ca.crt \ diff --git a/charts/redpanda/redpanda/templates/tests/test-pandaproxy-status.yaml b/charts/redpanda/redpanda/templates/tests/test-pandaproxy-status.yaml index de75cc29a..3797035b0 100644 --- a/charts/redpanda/redpanda/templates/tests/test-pandaproxy-status.yaml +++ b/charts/redpanda/redpanda/templates/tests/test-pandaproxy-status.yaml @@ -40,20 +40,30 @@ spec: command: [ "/bin/bash", "-c" ] args: - | - {{- if $sasl.enabled }} - USERNAME=$(find /etc/secrets/users/* -print | sed -n 1p | xargs cat | sed -n 1p | tr ':' '\n' | sed -n 1p ) - PASSWORD=$(find /etc/secrets/users/* -print | sed -n 1p | xargs cat | sed -n 1p | tr ':' '\n' | sed -n 2p ) + {{- if .Values.auth.sasl.enabled }} + old_setting=${-//[^x]/} + set +x + IFS=: read -r {{ include "rpk-sasl-environment-variables" . }} < $(find /etc/secrets/users/* -print) + {{- if (include "redpanda-atleast-23-2-1" . | fromJson).bool }} + RPK_SASL_MECHANISM=${RPK_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} + {{- else }} + REDPANDA_SASL_MECHANISM=${REDPANDA_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} {{- end }} + export {{ include "rpk-sasl-environment-variables" . }} + RPK_USER="${RPK_USER:-${REDPANDA_SASL_USERNAME}}" + RPK_PASS="${RPK_PASS:-${REDPANDA_SASL_PASSWORD}}" + if [[ -n "$old_setting" ]]; then set -x; fi + {{- end }} curl -svm3 --fail --retry "120" --retry-max-time "120" --retry-all-errors \ {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.http.authenticationMethod }} - -u $USERNAME:$PASSWORD \ + -u ${RPK_USER}:${RPK_PASS} \ {{- end }} http://{{ include "redpanda.servicename" . }}:{{ .Values.listeners.http.port }}/brokers curl -svm3 --fail --retry "120" --retry-max-time "120" --retry-all-errors \ {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.http.authenticationMethod }} - -u $USERNAME:$PASSWORD \ + -u ${RPK_USER}:${RPK_PASS} \ {{- end }} http://{{ include "redpanda.servicename" . }}:{{ .Values.listeners.http.port }}/topics volumeMounts: {{ include "default-mounts" . | nindent 8 }} diff --git a/charts/redpanda/redpanda/templates/tests/test-rack-awareness.yaml b/charts/redpanda/redpanda/templates/tests/test-rack-awareness.yaml index 4b6c56f0d..77903dacf 100644 --- a/charts/redpanda/redpanda/templates/tests/test-rack-awareness.yaml +++ b/charts/redpanda/redpanda/templates/tests/test-rack-awareness.yaml @@ -14,25 +14,23 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */}} -{{- $root := deepCopy . }} apiVersion: v1 kind: Pod metadata: name: {{ include "redpanda.fullname" . }}-test-rack-awareness namespace: {{ .Release.Namespace | quote }} - labels: - {{- with include "full.labels" . }} - {{- . | nindent 4 }} - {{- end }} +{{- with include "full.labels" . }} + labels: {{- . | nindent 4 }} +{{- end }} annotations: "helm.sh/hook": test "helm.sh/hook-delete-policy": before-hook-creation spec: restartPolicy: Never securityContext: {{ include "pod-security-context" . | nindent 4 }} - {{- with .Values.imagePullSecrets }} +{{- with .Values.imagePullSecrets }} imagePullSecrets: {{- toYaml . | nindent 4 }} - {{- end }} +{{- end }} containers: - name: {{ template "redpanda.name" . }} image: {{ .Values.image.repository }}:{{ template "redpanda.tag" . }} @@ -41,33 +39,22 @@ spec: - -c - | set -e - {{- if and .Values.rackAwareness.enabled (include "redpanda-atleast-22-3-0" . | fromJson).bool }} +{{- if and .Values.rackAwareness.enabled (include "redpanda-atleast-22-3-0" . | fromJson).bool }} curl --silent --fail --retry 120 \ --retry-max-time 120 --retry-all-errors \ - {{- if (include "tls-enabled" . | fromJson).bool }} - {{- range $name, $cert := .Values.tls.certs }} - {{- if and $cert.caEnabled (eq $name "default") }} - --cacert {{ printf "/etc/tls/certs/%s/ca.crt" $name }} \ - {{- end }} - {{- end }} - https://{{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.admin.port }}/v1/node_config | grep '"rack":"rack[1-4]"' - {{- else }} - http://{{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.admin.port }}/v1/node_config | grep '"rack":"rack[1-4]"' - {{- end }} + {{- if (include "tls-enabled" . | fromJson).bool }} + {{- if (dig "default" "caEnabled" false .Values.tls.certs) }} + --cacert "/etc/tls/certs/default/ca.crt" \ {{- end }} + https://{{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.admin.port }}/v1/node_config | grep '"rack":"rack[1-4]"' + {{- else }} + http://{{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.admin.port }}/v1/node_config | grep '"rack":"rack[1-4]"' + {{- end }} +{{- end }} - rpk redpanda admin config print \ - {{- if (include "tls-enabled" . | fromJson).bool }} - {{- range $name, $cert := .Values.tls.certs }} - {{- if and $cert.caEnabled (eq $name "default") }} - --admin-api-tls-enabled \ - --admin-api-tls-truststore {{ printf "/etc/tls/certs/%s/ca.crt" $name }} \ - {{- end }} - {{- end }} - {{- end }} - --host {{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.admin.port }} | grep '"enable_rack_awareness": {{ .Values.rackAwareness.enabled }}' + rpk redpanda admin config print --host {{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.admin.port }} | grep '"enable_rack_awareness": {{ .Values.rackAwareness.enabled }}' - rpk cluster config get enable_rack_awareness {{ template "rpk-acl-user-flags" $ }} | grep '{{ .Values.rackAwareness.enabled }}' + rpk cluster config get enable_rack_awareness volumeMounts: {{ include "default-mounts" . | nindent 8 }} securityContext: {{ include "container-security-context" . | nindent 8 }} volumes: {{ include "default-volumes" . | nindent 4 }} diff --git a/charts/redpanda/redpanda/templates/tests/test-rpk-debug-bundle.yaml b/charts/redpanda/redpanda/templates/tests/test-rpk-debug-bundle.yaml index e1a5eb6ec..2d3a86b49 100644 --- a/charts/redpanda/redpanda/templates/tests/test-rpk-debug-bundle.yaml +++ b/charts/redpanda/redpanda/templates/tests/test-rpk-debug-bundle.yaml @@ -65,7 +65,19 @@ spec: - -c - | set -e - rpk debug bundle -o /usr/share/redpanda/test/debug-test.zip -n {{ .Release.Namespace }} {{ include "rpk-common-flags" . }} + {{- if .Values.auth.sasl.enabled }} + old_setting=${-//[^x]/} + set +x + IFS=: read -r {{ include "rpk-sasl-environment-variables" . }} < $(find /etc/secrets/users/* -print) + {{- if (include "redpanda-atleast-23-2-1" . | fromJson).bool }} + RPK_SASL_MECHANISM=${RPK_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} + {{- else }} + REDPANDA_SASL_MECHANISM=${REDPANDA_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} + {{- end }} + export {{ include "rpk-sasl-environment-variables" . }} + if [[ -n "$old_setting" ]]; then set -x; fi + {{- end }} + rpk debug bundle -o /usr/share/redpanda/test/debug-test.zip -n {{ .Release.Namespace }} containers: - name: {{ template "redpanda.name" . }}-tester image: busybox:latest diff --git a/charts/redpanda/redpanda/templates/tests/test-sasl-updated.yaml b/charts/redpanda/redpanda/templates/tests/test-sasl-updated.yaml index c691f9187..8209397a0 100644 --- a/charts/redpanda/redpanda/templates/tests/test-sasl-updated.yaml +++ b/charts/redpanda/redpanda/templates/tests/test-sasl-updated.yaml @@ -47,11 +47,21 @@ spec: - bash - -c - | - set -xe + set -e + IFS=: read -r {{ include "rpk-sasl-environment-variables" . }} < $(find /etc/secrets/users/* -print) + {{- if (include "redpanda-atleast-23-2-1" . | fromJson).bool }} + RPK_SASL_MECHANISM=${RPK_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} + {{- else }} + REDPANDA_SASL_MECHANISM=${REDPANDA_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} + {{- end }} + export {{ include "rpk-sasl-environment-variables" . }} + + set -x + # check that the users list did update ready_result_exit_code=1 while [[ ${ready_result_exit_code} -ne 0 ]]; do - ready_result=$(rpk acl user list {{ include "rpk-acl-user-flags" . }} | grep anotheranotherme 2>&1) && ready_result_exit_code=$? + ready_result=$(rpk acl user list | grep anotheranotherme 2>&1) && ready_result_exit_code=$? sleep 2 done diff --git a/charts/redpanda/redpanda/templates/tests/test-schemaregistry-internal-tls-status.yaml b/charts/redpanda/redpanda/templates/tests/test-schemaregistry-internal-tls-status.yaml index 9eb192485..1e72b009c 100644 --- a/charts/redpanda/redpanda/templates/tests/test-schemaregistry-internal-tls-status.yaml +++ b/charts/redpanda/redpanda/templates/tests/test-schemaregistry-internal-tls-status.yaml @@ -43,93 +43,75 @@ spec: command: ["/bin/bash", "-c"] args: - | - {{- if $sasl.enabled }} - USERNAME=$(find /etc/secrets/users/* -print | sed -n 1p | xargs cat | sed -n 1p | tr ':' '\n' | sed -n 1p ) - PASSWORD=$(find /etc/secrets/users/* -print | sed -n 1p | xargs cat | sed -n 1p | tr ':' '\n' | sed -n 2p ) + {{- if .Values.auth.sasl.enabled }} + old_setting=${-//[^x]/} + set +x + IFS=: read -r {{ include "rpk-sasl-environment-variables" . }} < $(find /etc/secrets/users/* -print) + {{- if (include "redpanda-atleast-23-2-1" . | fromJson).bool }} + RPK_SASL_MECHANISM=${RPK_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} + {{- else }} + REDPANDA_SASL_MECHANISM=${REDPANDA_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} {{- end }} + export {{ include "rpk-sasl-environment-variables" . }} + RPK_USER="${RPK_USER:-${REDPANDA_SASL_USERNAME}}" + RPK_PASS="${RPK_PASS:-${REDPANDA_SASL_PASSWORD}}" + if [[ -n "$old_setting" ]]; then set -x; fi + {{- end }} + + set -ex schemaCurl () { - curl -svm3 --fail --retry "120" --retry-max-time "120" --retry-all-errors \ - {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.schemaRegistry.authenticationMethod }} - -u $USERNAME:$PASSWORD \ - {{- end }} - {{- if $cert.caEnabled }} - --cacert /etc/tls/certs/{{ $service.tls.cert }}/ca.crt \ - {{- end }} - $* + curl -svm3 --fail --retry "120" --retry-max-time "120" --retry-all-errors -o - \ + {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.schemaRegistry.authenticationMethod }} + -u "${RPK_USER}:${RPK_PASS}" \ + {{- end }} + {{- if $cert.caEnabled }} + --cacert /etc/tls/certs/{{ $service.tls.cert }}/ca.crt \ + {{- end }} + $* } + echo "Get existng schemas" schemaCurl https://{{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.schemaRegistry.port }}/subjects - curl -svm3 --fail --retry "120" --retry-max-time "120" --retry-all-errors \ - -X POST -H 'Content-Type:application/vnd.schemaregistry.v1+json' \ - -d '{"schema": "{\"type\":\"record\",\"name\":\"sensor_sample\",\"fields\":[{\"name\":\"timestamp\",\"type\":\"long\",\"logicalType\":\"timestamp-millis\"},{\"name\":\"identifier\",\"type\":\"string\",\"logicalType\":\"uuid\"},{\"name\":\"value\",\"type\":\"long\"}]}"}' \ - {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.schemaRegistry.authenticationMethod }} - -u $USERNAME:$PASSWORD \ - {{- end }} - {{- if $cert.caEnabled }} - --cacert /etc/tls/certs/{{ $service.tls.cert }}/ca.crt \ - {{- end }} - https://{{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.schemaRegistry.port }}/subjects/sensor-value/versions + echo "Create schema" + curl -svm3 --fail --retry "120" --retry-max-time "120" --retry-all-errors -o - \ + -X POST -H 'Content-Type:application/vnd.schemaregistry.v1+json' \ + -d '{"schema": "{\"type\":\"record\",\"name\":\"sensor_sample\",\"fields\":[{\"name\":\"timestamp\",\"type\":\"long\",\"logicalType\":\"timestamp-millis\"},{\"name\":\"identifier\",\"type\":\"string\",\"logicalType\":\"uuid\"},{\"name\":\"value\",\"type\":\"long\"}]}"}' \ + {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.schemaRegistry.authenticationMethod }} + -u ${RPK_USER}:${RPK_PASS} \ + {{- end }} + {{- if $cert.caEnabled }} + --cacert /etc/tls/certs/{{ $service.tls.cert }}/ca.crt \ + {{- end }} + https://{{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.schemaRegistry.port }}/subjects/sensor-value/versions + echo "Get schema 1" schemaCurl https://{{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.schemaRegistry.port }}/schemas/ids/1 + echo "Get existng schemas" schemaCurl https://{{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.schemaRegistry.port }}/subjects - max_iteration=10 - - for i in $(seq 1 $max_iteration) - do - curl -vv -X DELETE \ - {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.schemaRegistry.authenticationMethod }} - -u $USERNAME:$PASSWORD \ - {{- end }} - {{- if $cert.caEnabled }} + echo "Delete schema 1" + curl -svm3 --fail --retry "120" --retry-max-time "120" --retry-all-errors -X DELETE -o - \ + {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.schemaRegistry.authenticationMethod }} + -u ${RPK_USER}:${RPK_PASS} \ + {{- end }} + {{- if $cert.caEnabled }} --cacert /etc/tls/certs/{{ $service.tls.cert }}/ca.crt \ - {{- end }} + {{- end }} https://{{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.schemaRegistry.port }}/subjects/sensor-value/versions/1 - result=$? - if [[ $result -eq 0 ]] - then - echo "Result successful" - break - else - echo "Result unsuccessful" - sleep 1 - fi - done - if [[ $result -ne 0 ]] - then - echo "All of the trials failed to delete schema!!!" - fi - - for i in $(seq 1 $max_iteration) - do - curl -vv -X DELETE \ - {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.schemaRegistry.authenticationMethod }} - -u $USERNAME:$PASSWORD \ - {{- end }} - {{- if $cert.caEnabled }} + echo "Delete schema 1 permanently" + curl -svm3 --fail --retry "120" --retry-max-time "120" --retry-all-errors -X DELETE -o - \ + {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.schemaRegistry.authenticationMethod }} + -u ${RPK_USER}:${RPK_PASS} \ + {{- end }} + {{- if $cert.caEnabled }} --cacert /etc/tls/certs/{{ $service.tls.cert }}/ca.crt \ - {{- end }} + {{- end }} https://{{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.schemaRegistry.port }}/subjects/sensor-value/versions/1?permanent=true - result=$? - if [[ $result -eq 0 ]] - then - echo "Result successful" - break - else - echo "Result unsuccessful" - sleep 1 - fi - done - if [[ $result -ne 0 ]] - then - echo "All of the trials failed to permanently delete schema!!!" - exit 1 - fi volumeMounts: {{ include "default-mounts" . | nindent 8 }} resources: {{ toYaml .Values.statefulset.resources | nindent 12 }} securityContext: {{ include "container-security-context" . | nindent 8 }} diff --git a/charts/redpanda/redpanda/templates/tests/test-schemaregistry-status.yaml b/charts/redpanda/redpanda/templates/tests/test-schemaregistry-status.yaml index 7f98cb4ed..5d7ee92e9 100644 --- a/charts/redpanda/redpanda/templates/tests/test-schemaregistry-status.yaml +++ b/charts/redpanda/redpanda/templates/tests/test-schemaregistry-status.yaml @@ -40,15 +40,25 @@ spec: command: [ "/bin/bash", "-c" ] args: - | - {{- if $sasl.enabled }} - USERNAME=$(find /etc/secrets/users/* -print | sed -n 1p | xargs cat | sed -n 1p | tr ':' '\n' | sed -n 1p ) - PASSWORD=$(find /etc/secrets/users/* -print | sed -n 1p | xargs cat | sed -n 1p | tr ':' '\n' | sed -n 2p ) + {{- if .Values.auth.sasl.enabled }} + old_setting=${-//[^x]/} + set +x + IFS=: read -r {{ include "rpk-sasl-environment-variables" . }} < $(find /etc/secrets/users/* -print) + {{- if (include "redpanda-atleast-23-2-1" . | fromJson).bool }} + RPK_SASL_MECHANISM=${RPK_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} + {{- else }} + REDPANDA_SASL_MECHANISM=${REDPANDA_SASL_MECHANISM:-{{ .Values.auth.sasl.mechanism | upper }}} {{- end }} + export {{ include "rpk-sasl-environment-variables" . }} + RPK_USER="${RPK_USER:-${REDPANDA_SASL_USERNAME}}" + RPK_PASS="${RPK_PASS:-${REDPANDA_SASL_PASSWORD}}" + if [[ -n "$old_setting" ]]; then set -x; fi + {{- end }} schemaCurl () { curl -svm3 --fail --retry "120" --retry-max-time "120" --retry-all-errors \ {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.schemaRegistry.authenticationMethod }} - -u $USERNAME:$PASSWORD \ + -u ${RPK_USER}:${RPK_PASS} \ {{- end }} $* } @@ -59,7 +69,7 @@ spec: -X POST -H 'Content-Type:application/vnd.schemaregistry.v1+json' \ -d '{"schema":"{\"type\":\"record\",\"name\":\"sensor_sample\",\"fields\":[{\"name\":\"timestamp\",\"type\":\"long\",\"logicalType\":\"timestamp-millis\"},{\"name\":\"identifier\",\"type\":\"string\",\"logicalType\":\"uuid\"},{\"name\":\"value\",\"type\":\"long\"}]}"}' \ {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.schemaRegistry.authenticationMethod }} - -u $USERNAME:$PASSWORD \ + -u ${RPK_USER}:${RPK_PASS} \ {{- end }} http://{{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.schemaRegistry.port }}/subjects/sensor-value/versions @@ -73,7 +83,7 @@ spec: do curl -vv -X DELETE \ {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.schemaRegistry.authenticationMethod }} - -u $USERNAME:$PASSWORD \ + -u ${RPK_USER}:${RPK_PASS} \ {{- end }} http://{{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.schemaRegistry.port }}/subjects/sensor-value/versions/1 result=$? @@ -96,7 +106,7 @@ spec: do curl -vv -X DELETE \ {{- if or (include "sasl-enabled" .|fromJson).bool .Values.listeners.schemaRegistry.authenticationMethod }} - -u $USERNAME:$PASSWORD \ + -u ${RPK_USER}:${RPK_PASS} \ {{- end }} http://{{ include "redpanda.internal.domain" . }}:{{ .Values.listeners.schemaRegistry.port }}/subjects/sensor-value/versions/1?permanent=true result=$? diff --git a/charts/stackstate/stackstate-k8s-agent/.helmignore b/charts/stackstate/stackstate-k8s-agent/.helmignore new file mode 100644 index 000000000..15a5c1277 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/.helmignore @@ -0,0 +1,26 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +linter_values.yaml +ci/ +installation/ +logo.svg diff --git a/charts/stackstate/stackstate-k8s-agent/Chart.lock b/charts/stackstate/stackstate-k8s-agent/Chart.lock new file mode 100644 index 000000000..eb882a083 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: http-header-injector + repository: https://helm.stackstate.io + version: 0.0.6 +digest: sha256:eec4d022d97ef52e88860b54682692fd369c864ca49ccde01b30605cce20c96f +generated: "2023-08-25T14:49:57.569449+02:00" diff --git a/charts/stackstate/stackstate-k8s-agent/Chart.yaml b/charts/stackstate/stackstate-k8s-agent/Chart.yaml new file mode 100644 index 000000000..3f1874084 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: StackState Agent + catalog.cattle.io/kube-version: '>=1.19.0-0' + catalog.cattle.io/release-name: stackstate-k8s-agent +apiVersion: v2 +appVersion: 2.19.1 +dependencies: +- alias: httpHeaderInjectorWebhook + name: http-header-injector + repository: file://./charts/http-header-injector + version: 0.0.6 +description: Helm chart for the StackState Agent. +home: https://github.com/StackVista/stackstate-agent +icon: https://raw.githubusercontent.com/StackVista/helm-charts/master/stable/stackstate-k8s-agent/logo.svg +keywords: +- monitoring +- observability +- stackstate +maintainers: +- email: ops@stackstate.com + name: Stackstate +name: stackstate-k8s-agent +version: 1.0.49 diff --git a/charts/stackstate/stackstate-k8s-agent/README.md b/charts/stackstate/stackstate-k8s-agent/README.md new file mode 100644 index 000000000..714263c4e --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/README.md @@ -0,0 +1,235 @@ +# stackstate-k8s-agent + +Helm chart for the StackState Agent. + +Current chart version is `1.0.49` + +**Homepage:** + +## Requirements + +| Repository | Name | Version | +|------------|------|---------| +| https://helm.stackstate.io | httpHeaderInjectorWebhook(http-header-injector) | 0.0.6 | + +## Required Values + +In order to successfully install this chart, you **must** provide the following variables: + +* `stackstate.apiKey` +* `stackstate.cluster.name` +* `stackstate.url` + +The parameter `stackstate.cluster.name` is entered when installing the Cluster Agent StackPack. + +Install them on the command line on Helm with the following command: + +```shell +helm install \ +--set-string 'stackstate.apiKey'='' \ +--set-string 'stackstate.cluster.name'='' \ +--set-string 'stackstate.url'='' \ +stackstate/stackstate-k8s-agent +``` + +## Recommended Values + +It is also recommended that you set a value for `stackstate.cluster.authToken`. If it is not provided, a value will be generated for you, but the value will change each time an upgrade is performed. + +The command for **also** installing with a set token would be: + +```shell +helm install \ +--set-string 'stackstate.apiKey'='' \ +--set-string 'stackstate.cluster.name'='' \ +--set-string 'stackstate.cluster.authToken'='' \ +--set-string 'stackstate.url'='' \ +stackstate/stackstate-k8s-agent +``` + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| all.hardening.enabled | bool | `false` | An indication of whether the containers will be evaluated for hardening at runtime | +| all.image.registry | string | `"quay.io"` | The image registry to use. | +| checksAgent.affinity | object | `{}` | Affinity settings for pod assignment. | +| checksAgent.apm.enabled | bool | `true` | Enable / disable the agent APM module. | +| checksAgent.checksTagCardinality | string | `"orchestrator"` | | +| checksAgent.config | object | `{"override":[]}` | | +| checksAgent.config.override | list | `[]` | A list of objects containing three keys `name`, `path` and `data`, specifying filenames at specific paths which need to be (potentially) overridden using a mounted configmap | +| checksAgent.enabled | bool | `true` | Enable / disable runnning cluster checks in a separately deployed pod | +| checksAgent.image.pullPolicy | string | `"IfNotPresent"` | Default container image pull policy. | +| checksAgent.image.repository | string | `"stackstate/stackstate-k8s-agent"` | Base container image repository. | +| checksAgent.image.tag | string | `"e36d1c88"` | Default container image tag. | +| checksAgent.livenessProbe.enabled | bool | `true` | Enable use of livenessProbe check. | +| checksAgent.livenessProbe.failureThreshold | int | `3` | `failureThreshold` for the liveness probe. | +| checksAgent.livenessProbe.initialDelaySeconds | int | `15` | `initialDelaySeconds` for the liveness probe. | +| checksAgent.livenessProbe.periodSeconds | int | `15` | `periodSeconds` for the liveness probe. | +| checksAgent.livenessProbe.successThreshold | int | `1` | `successThreshold` for the liveness probe. | +| checksAgent.livenessProbe.timeoutSeconds | int | `5` | `timeoutSeconds` for the liveness probe. | +| checksAgent.logLevel | string | `"INFO"` | Logging level for clusterchecks agent processes. | +| checksAgent.networkTracing.enabled | bool | `true` | Enable / disable the agent network tracing module. | +| checksAgent.nodeSelector | object | `{}` | Node labels for pod assignment. | +| checksAgent.priorityClassName | string | `""` | Priority class for clusterchecks agent pods. | +| checksAgent.processAgent.enabled | bool | `true` | Enable / disable the agent process agent module. | +| checksAgent.readinessProbe.enabled | bool | `true` | Enable use of readinessProbe check. | +| checksAgent.readinessProbe.failureThreshold | int | `3` | `failureThreshold` for the readiness probe. | +| checksAgent.readinessProbe.initialDelaySeconds | int | `15` | `initialDelaySeconds` for the readiness probe. | +| checksAgent.readinessProbe.periodSeconds | int | `15` | `periodSeconds` for the readiness probe. | +| checksAgent.readinessProbe.successThreshold | int | `1` | `successThreshold` for the readiness probe. | +| checksAgent.readinessProbe.timeoutSeconds | int | `5` | `timeoutSeconds` for the readiness probe. | +| checksAgent.replicas | int | `1` | Number of clusterchecks agent pods to schedule | +| checksAgent.resources.limits.cpu | string | `"400m"` | Memory resource limits. | +| checksAgent.resources.limits.memory | string | `"600Mi"` | | +| checksAgent.resources.requests.cpu | string | `"20m"` | Memory resource requests. | +| checksAgent.resources.requests.memory | string | `"512Mi"` | | +| checksAgent.scc.enabled | bool | `false` | Enable / disable the installation of the SecurityContextConfiguration needed for installation on OpenShift | +| checksAgent.serviceaccount.annotations | object | `{}` | Annotations for the service account for the cluster checks pods | +| checksAgent.skipSslValidation | bool | `false` | Set to true if self signed certificates are used. | +| checksAgent.strategy | object | `{"type":"RollingUpdate"}` | The strategy for the Deployment object. | +| checksAgent.tolerations | list | `[]` | Toleration labels for pod assignment. | +| clusterAgent.affinity | object | `{}` | Affinity settings for pod assignment. | +| clusterAgent.collection.kubeStateMetrics.annotationsAsTags | object | `{}` | Extra annotations to collect from resources and to turn into StackState tag. | +| clusterAgent.collection.kubeStateMetrics.clusterCheck | bool | `false` | For large clusters where the Kubernetes State Metrics Check Core needs to be distributed on dedicated workers. | +| clusterAgent.collection.kubeStateMetrics.enabled | bool | `true` | Enable / disable the cluster agent kube-state-metrics collection. | +| clusterAgent.collection.kubeStateMetrics.labelsAsTags | object | `{}` | Extra labels to collect from resources and to turn into StackState tag. # It has the following structure: # labelsAsTags: # : # can be pod, deployment, node, etc. # : # where is the kubernetes label and is the StackState tag # : # : # : # # Warning: the label must match the transformation done by kube-state-metrics, # for example tags.stackstate/version becomes tags_stackstate_version. | +| clusterAgent.collection.kubernetesEvents | bool | `true` | Enable / disable the cluster agent events collection. | +| clusterAgent.collection.kubernetesMetrics | bool | `true` | Enable / disable the cluster agent metrics collection. | +| clusterAgent.collection.kubernetesResources.configmaps | bool | `true` | Enable / disable collection of ConfigMaps. | +| clusterAgent.collection.kubernetesResources.cronjobs | bool | `true` | Enable / disable collection of CronJobs. | +| clusterAgent.collection.kubernetesResources.daemonsets | bool | `true` | Enable / disable collection of DaemonSets. | +| clusterAgent.collection.kubernetesResources.deployments | bool | `true` | Enable / disable collection of Deployments. | +| clusterAgent.collection.kubernetesResources.endpoints | bool | `true` | Enable / disable collection of Endpoints. If endpoints are disabled then StackState won't be able to connect a Service to Pods that serving it | +| clusterAgent.collection.kubernetesResources.ingresses | bool | `true` | Enable / disable collection of Ingresses. | +| clusterAgent.collection.kubernetesResources.jobs | bool | `true` | Enable / disable collection of Jobs. | +| clusterAgent.collection.kubernetesResources.namespaces | bool | `true` | Enable / disable collection of Namespaces. | +| clusterAgent.collection.kubernetesResources.persistentvolumeclaims | bool | `true` | Enable / disable collection of PersistentVolumeClaims. Disabling these will not let StackState connect PersistentVolumes to pods they are attached to | +| clusterAgent.collection.kubernetesResources.persistentvolumes | bool | `true` | Enable / disable collection of PersistentVolumes. | +| clusterAgent.collection.kubernetesResources.replicasets | bool | `true` | Enable / disable collection of ReplicaSets. | +| clusterAgent.collection.kubernetesResources.resourcequotas | bool | `true` | Enable / disable collection of ResourceQuotas. | +| clusterAgent.collection.kubernetesResources.secrets | bool | `true` | Enable / disable collection of Secrets. | +| clusterAgent.collection.kubernetesResources.statefulsets | bool | `true` | Enable / disable collection of StatefulSets. | +| clusterAgent.collection.kubernetesResources.volumeattachments | bool | `true` | Enable / disable collection of Volume Attachments. Used to bind Nodes to Persistent Volumes. | +| clusterAgent.collection.kubernetesTimeout | int | `10` | Default timeout (in seconds) when obtaining information from the Kubernetes API. | +| clusterAgent.collection.kubernetesTopology | bool | `true` | Enable / disable the cluster agent topology collection. | +| clusterAgent.config | object | `{"configMap":{"maxDataSize":null},"events":{"categories":{}},"override":[],"topology":{"collectionInterval":90}}` | | +| clusterAgent.config.configMap.maxDataSize | string | `nil` | Maximum amount of characters for the data property of a ConfigMap collected by the kubernetes topology check | +| clusterAgent.config.events.categories | object | `{}` | Custom mapping from Kubernetes event reason to StackState event category. Categories allowed: Alerts, Activities, Changes, Others | +| clusterAgent.config.override | list | `[]` | A list of objects containing three keys `name`, `path` and `data`, specifying filenames at specific paths which need to be (potentially) overridden using a mounted configmap | +| clusterAgent.config.topology.collectionInterval | int | `90` | Interval for running topology collection, in seconds | +| clusterAgent.enabled | bool | `true` | Enable / disable the cluster agent. | +| clusterAgent.image.pullPolicy | string | `"IfNotPresent"` | Default container image pull policy. | +| clusterAgent.image.repository | string | `"stackstate/stackstate-k8s-cluster-agent"` | Base container image repository. | +| clusterAgent.image.tag | string | `"e36d1c88"` | Default container image tag. | +| clusterAgent.livenessProbe.enabled | bool | `true` | Enable use of livenessProbe check. | +| clusterAgent.livenessProbe.failureThreshold | int | `3` | `failureThreshold` for the liveness probe. | +| clusterAgent.livenessProbe.initialDelaySeconds | int | `15` | `initialDelaySeconds` for the liveness probe. | +| clusterAgent.livenessProbe.periodSeconds | int | `15` | `periodSeconds` for the liveness probe. | +| clusterAgent.livenessProbe.successThreshold | int | `1` | `successThreshold` for the liveness probe. | +| clusterAgent.livenessProbe.timeoutSeconds | int | `5` | `timeoutSeconds` for the liveness probe. | +| clusterAgent.logLevel | string | `"INFO"` | Logging level for stackstate-k8s-agent processes. | +| clusterAgent.nodeSelector | object | `{}` | Node labels for pod assignment. | +| clusterAgent.priorityClassName | string | `""` | Priority class for stackstate-k8s-agent pods. | +| clusterAgent.readinessProbe.enabled | bool | `true` | Enable use of readinessProbe check. | +| clusterAgent.readinessProbe.failureThreshold | int | `3` | `failureThreshold` for the readiness probe. | +| clusterAgent.readinessProbe.initialDelaySeconds | int | `15` | `initialDelaySeconds` for the readiness probe. | +| clusterAgent.readinessProbe.periodSeconds | int | `15` | `periodSeconds` for the readiness probe. | +| clusterAgent.readinessProbe.successThreshold | int | `1` | `successThreshold` for the readiness probe. | +| clusterAgent.readinessProbe.timeoutSeconds | int | `5` | `timeoutSeconds` for the readiness probe. | +| clusterAgent.replicaCount | int | `1` | Number of replicas of the cluster agent to deploy. | +| clusterAgent.resources.limits.cpu | string | `"400m"` | CPU resource limits. | +| clusterAgent.resources.limits.memory | string | `"800Mi"` | Memory resource limits. | +| clusterAgent.resources.requests.cpu | string | `"70m"` | CPU resource requests. | +| clusterAgent.resources.requests.memory | string | `"512Mi"` | Memory resource requests. | +| clusterAgent.service.port | int | `5005` | Change the Cluster Agent service port | +| clusterAgent.service.targetPort | int | `5005` | Change the Cluster Agent service targetPort | +| clusterAgent.serviceaccount.annotations | object | `{}` | Annotations for the service account for the cluster agent pods | +| clusterAgent.strategy | object | `{"type":"RollingUpdate"}` | The strategy for the Deployment object. | +| clusterAgent.tolerations | list | `[]` | Toleration labels for pod assignment. | +| fullnameOverride | string | `""` | Override the fullname of the chart. | +| global.extraEnv.open | object | `{}` | Extra open environment variables to inject into pods. | +| global.extraEnv.secret | object | `{}` | Extra secret environment variables to inject into pods via a `Secret` object. | +| global.imagePullCredentials | object | `{}` | Globally define credentials for pulling images. | +| global.imagePullSecrets | list | `[]` | Secrets / credentials needed for container image registry. | +| httpHeaderInjectorWebhook.enabled | bool | `false` | Enable the webhook for injection http header injection sidecar proxy | +| logsAgent.affinity | object | `{}` | Affinity settings for pod assignment. | +| logsAgent.enabled | bool | `true` | Enable / disable k8s pod log collection | +| logsAgent.image.pullPolicy | string | `"IfNotPresent"` | Default container image pull policy. | +| logsAgent.image.repository | string | `"stackstate/promtail"` | Base container image repository. | +| logsAgent.image.tag | string | `"2.7.1"` | Default container image tag. | +| logsAgent.nodeSelector | object | `{}` | Node labels for pod assignment. | +| logsAgent.priorityClassName | string | `""` | Priority class for logsAgent pods. | +| logsAgent.resources.limits.cpu | string | `"1300m"` | Memory resource limits. | +| logsAgent.resources.limits.memory | string | `"192Mi"` | | +| logsAgent.resources.requests.cpu | string | `"20m"` | Memory resource requests. | +| logsAgent.resources.requests.memory | string | `"100Mi"` | | +| logsAgent.serviceaccount.annotations | object | `{}` | Annotations for the service account for the daemonset pods | +| logsAgent.tolerations | list | `[]` | Toleration labels for pod assignment. | +| logsAgent.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":100},"type":"RollingUpdate"}` | The update strategy for the DaemonSet object. | +| nameOverride | string | `""` | Override the name of the chart. | +| nodeAgent.affinity | object | `{}` | Affinity settings for pod assignment. | +| nodeAgent.apm.enabled | bool | `true` | Enable / disable the nodeAgent APM module. | +| nodeAgent.checksTagCardinality | string | `"orchestrator"` | low, orchestrator or high. Orchestrator level adds pod_name, high adds display_container_name | +| nodeAgent.config | object | `{"override":[]}` | | +| nodeAgent.config.override | list | `[]` | A list of objects containing three keys `name`, `path` and `data`, specifying filenames at specific paths which need to be (potentially) overridden using a mounted configmap | +| nodeAgent.containerRuntime.customSocketPath | string | `""` | If the container socket path does not match the default for CRI-O, Containerd or Docker, supply a custom socket path. | +| nodeAgent.containerRuntime.hostProc | string | `"/proc"` | | +| nodeAgent.containers.agent.env | object | `{}` | Additional environment variables for the agent container | +| nodeAgent.containers.agent.image.pullPolicy | string | `"IfNotPresent"` | Default container image pull policy. | +| nodeAgent.containers.agent.image.repository | string | `"stackstate/stackstate-k8s-agent"` | Base container image repository. | +| nodeAgent.containers.agent.image.tag | string | `"e36d1c88"` | Default container image tag. | +| nodeAgent.containers.agent.livenessProbe.enabled | bool | `true` | Enable use of livenessProbe check. | +| nodeAgent.containers.agent.livenessProbe.failureThreshold | int | `3` | `failureThreshold` for the liveness probe. | +| nodeAgent.containers.agent.livenessProbe.initialDelaySeconds | int | `15` | `initialDelaySeconds` for the liveness probe. | +| nodeAgent.containers.agent.livenessProbe.periodSeconds | int | `15` | `periodSeconds` for the liveness probe. | +| nodeAgent.containers.agent.livenessProbe.successThreshold | int | `1` | `successThreshold` for the liveness probe. | +| nodeAgent.containers.agent.livenessProbe.timeoutSeconds | int | `5` | `timeoutSeconds` for the liveness probe. | +| nodeAgent.containers.agent.logLevel | string | `nil` | Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off # If not set, fall back to the value of agent.logLevel. | +| nodeAgent.containers.agent.processAgent.enabled | bool | `false` | Enable / disable the agent process agent module. - deprecated | +| nodeAgent.containers.agent.readinessProbe.enabled | bool | `true` | Enable use of readinessProbe check. | +| nodeAgent.containers.agent.readinessProbe.failureThreshold | int | `3` | `failureThreshold` for the readiness probe. | +| nodeAgent.containers.agent.readinessProbe.initialDelaySeconds | int | `15` | `initialDelaySeconds` for the readiness probe. | +| nodeAgent.containers.agent.readinessProbe.periodSeconds | int | `15` | `periodSeconds` for the readiness probe. | +| nodeAgent.containers.agent.readinessProbe.successThreshold | int | `1` | `successThreshold` for the readiness probe. | +| nodeAgent.containers.agent.readinessProbe.timeoutSeconds | int | `5` | `timeoutSeconds` for the readiness probe. | +| nodeAgent.containers.agent.resources.limits.cpu | string | `"270m"` | Memory resource limits. | +| nodeAgent.containers.agent.resources.limits.memory | string | `"420Mi"` | | +| nodeAgent.containers.agent.resources.requests.cpu | string | `"20m"` | Memory resource requests. | +| nodeAgent.containers.agent.resources.requests.memory | string | `"180Mi"` | | +| nodeAgent.containers.processAgent.enabled | bool | `true` | Enable / disable the process agent container. | +| nodeAgent.containers.processAgent.env | object | `{}` | Additional environment variables for the process-agent container | +| nodeAgent.containers.processAgent.image.pullPolicy | string | `"IfNotPresent"` | Process-agent container image pull policy. | +| nodeAgent.containers.processAgent.image.registry | string | `nil` | | +| nodeAgent.containers.processAgent.image.repository | string | `"stackstate/stackstate-k8s-process-agent"` | Process-agent container image repository. | +| nodeAgent.containers.processAgent.image.tag | string | `"c9dbfd73"` | Default process-agent container image tag. | +| nodeAgent.containers.processAgent.logLevel | string | `nil` | Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off # If not set, fall back to the value of agent.logLevel. | +| nodeAgent.containers.processAgent.resources.limits.cpu | string | `"125m"` | Memory resource limits. | +| nodeAgent.containers.processAgent.resources.limits.memory | string | `"400Mi"` | | +| nodeAgent.containers.processAgent.resources.requests.cpu | string | `"25m"` | Memory resource requests. | +| nodeAgent.containers.processAgent.resources.requests.memory | string | `"128Mi"` | | +| nodeAgent.httpTracing.enabled | bool | `true` | | +| nodeAgent.logLevel | string | `"INFO"` | Logging level for agent processes. | +| nodeAgent.networkTracing.enabled | bool | `true` | Enable / disable the nodeAgent network tracing module. | +| nodeAgent.nodeSelector | object | `{}` | Node labels for pod assignment. | +| nodeAgent.priorityClassName | string | `""` | Priority class for nodeAgent pods. | +| nodeAgent.protocolInspection.enabled | bool | `true` | Enable / disable the nodeAgent protocol inspection. | +| nodeAgent.scc.enabled | bool | `false` | Enable / disable the installation of the SecurityContextConfiguration needed for installation on OpenShift. | +| nodeAgent.service | object | `{"annotations":{},"loadBalancerSourceRanges":["10.0.0.0/8"],"type":"ClusterIP"}` | The Kubernetes service for the agent | +| nodeAgent.service.annotations | object | `{}` | Annotations for the service | +| nodeAgent.service.loadBalancerSourceRanges | list | `["10.0.0.0/8"]` | The IP4 CIDR allowed to reach LoadBalancer for the service. For LoadBalancer type of service only. | +| nodeAgent.service.type | string | `"ClusterIP"` | Type of Kubernetes service: ClusterIP, LoadBalancer, NodePort | +| nodeAgent.serviceaccount.annotations | object | `{}` | Annotations for the service account for the agent daemonset pods | +| nodeAgent.skipKubeletTLSVerify | bool | `false` | Set to true if you want to skip kubelet tls verification. | +| nodeAgent.skipSslValidation | bool | `false` | Set to true if self signed certificates are used. | +| nodeAgent.tolerations | list | `[]` | Toleration labels for pod assignment. | +| nodeAgent.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":100},"type":"RollingUpdate"}` | The update strategy for the DaemonSet object. | +| openShiftLogging.installSecret | bool | `false` | Install a secret for logging on openshift | +| processAgent.checkIntervals.connections | int | `30` | Override the default value of the connections check interval in seconds. | +| processAgent.checkIntervals.container | int | `30` | Override the default value of the container check interval in seconds. | +| processAgent.checkIntervals.process | int | `30` | Override the default value of the process check interval in seconds. | +| stackstate.apiKey | string | `nil` | **PROVIDE YOUR API KEY HERE** API key to be used by the StackState agent. | +| stackstate.cluster.authToken | string | `""` | Provide a token to enable secure communication between the agent and the cluster agent. | +| stackstate.cluster.name | string | `nil` | **PROVIDE KUBERNETES CLUSTER NAME HERE** Name of the Kubernetes cluster where the agent will be installed. | +| stackstate.url | string | `nil` | **PROVIDE STACKSTATE URL HERE** URL of the StackState installation to receive data from the agent. | +| targetSystem | string | `"linux"` | Target OS for this deployment (possible values: linux) | diff --git a/charts/stackstate/stackstate-k8s-agent/README.md.gotmpl b/charts/stackstate/stackstate-k8s-agent/README.md.gotmpl new file mode 100644 index 000000000..7909e6f0d --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/README.md.gotmpl @@ -0,0 +1,45 @@ +{{ template "chart.header" . }} +{{ template "chart.description" . }} + +Current chart version is `{{ template "chart.version" . }}` + +{{ template "chart.homepageLine" . }} + +{{ template "chart.requirementsSection" . }} + +## Required Values + +In order to successfully install this chart, you **must** provide the following variables: + +* `stackstate.apiKey` +* `stackstate.cluster.name` +* `stackstate.url` + +The parameter `stackstate.cluster.name` is entered when installing the Cluster Agent StackPack. + +Install them on the command line on Helm with the following command: + +```shell +helm install \ +--set-string 'stackstate.apiKey'='' \ +--set-string 'stackstate.cluster.name'='' \ +--set-string 'stackstate.url'='' \ +stackstate/stackstate-k8s-agent +``` + +## Recommended Values + +It is also recommended that you set a value for `stackstate.cluster.authToken`. If it is not provided, a value will be generated for you, but the value will change each time an upgrade is performed. + +The command for **also** installing with a set token would be: + +```shell +helm install \ +--set-string 'stackstate.apiKey'='' \ +--set-string 'stackstate.cluster.name'='' \ +--set-string 'stackstate.cluster.authToken'='' \ +--set-string 'stackstate.url'='' \ +stackstate/stackstate-k8s-agent +``` + +{{ template "chart.valuesSection" . }} diff --git a/charts/stackstate/stackstate-k8s-agent/Releasing.md b/charts/stackstate/stackstate-k8s-agent/Releasing.md new file mode 100644 index 000000000..bab6c2b94 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/Releasing.md @@ -0,0 +1,15 @@ +To make a new release of this helm chart, follow the following steps: + + +- Create a branch from master +- Set the latest tags for the docker images, based on the dev settings (while we do not promote to prod, the moment we promote to prod we should take those tags) from https://gitlab.com/stackvista/devops/agent-promoter/-/blob/master/config.yml. Set the value to the folowing keys: + * stackstate-k8s-cluster-agent: + * [clusterAgent.image.tag] + * stackstate-k8s-agent: + * [nodeAgent.containers.agent.image.tag] + * [checksAgent.image.tag] + * stackstate-k8s-process-agent: + * [nodeAgent.containers.processAgent.image.tag] +- Bump the version of the chart +- Merge the mr and hit the public release button on the ci pipeline +- Manually smoke-test (deploy) the newly released stackstate/stackstate-k8s-agent chart to make sure it runs diff --git a/charts/stackstate/stackstate-k8s-agent/app-readme.md b/charts/stackstate/stackstate-k8s-agent/app-readme.md new file mode 100644 index 000000000..8025fe1d3 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/app-readme.md @@ -0,0 +1,5 @@ +## Introduction + +StackState is a modern Application Troubleshooting and Observability solution designed for the rapid evolving engineering landscape. With specific enhancements for Kubernetes environments it empowers engineers, allowing them to remediate application issues independently in production. + +The StackState Agent auto-discovers your entire environment in minutes, assimilating topology, logs, metrics, and events and sends this of to the StackState server. By using StackState you're able to tracke all activity in your environment in real-time and over time. StackState provides instant understanding of the business impact of an issue, offering end-to-end chain observability and ensuring that you can quickly correlate any product or environmental changes to the overall health of your cloud-native implementation. diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/.helmignore b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/.helmignore new file mode 100644 index 000000000..69790771c --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/.helmignore @@ -0,0 +1,25 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ +linter_values.yaml +ci/ +installation/ diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/Chart.yaml b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/Chart.yaml new file mode 100644 index 000000000..c1f1de800 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/Chart.yaml @@ -0,0 +1,15 @@ +apiVersion: v2 +appVersion: 0.0.1 +description: 'Helm chart for deploying the http-header-injector sidecar, which automatically + injects x-request-id into http traffic going through the cluster for pods which + have the annotation `http-header-injector.stackstate.io/inject: enabled` is set. ' +home: https://github.com/StackVista/http-header-injector +icon: https://www.stackstate.com/wp-content/uploads/2019/02/152x152-favicon.png +keywords: +- monitoring +- stackstate +maintainers: +- email: ops@stackstate.com + name: Stackstate Lupulus Team +name: http-header-injector +version: 0.0.6 diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/README.md b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/README.md new file mode 100644 index 000000000..3f83e01b8 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/README.md @@ -0,0 +1,54 @@ +# http-header-injector + +![Version: 0.0.6](https://img.shields.io/badge/Version-0.0.6-informational?style=flat-square) ![AppVersion: 0.0.1](https://img.shields.io/badge/AppVersion-0.0.1-informational?style=flat-square) + +Helm chart for deploying the http-header-injector sidecar, which automatically injects x-request-id into http traffic +going through the cluster for pods which have the annotation `http-header-injector.stackstate.io/inject: enabled` is set. + +**Homepage:** + +## Maintainers + +| Name | Email | Url | +| ---- | ------ | --- | +| Stackstate Lupulus Team | | | + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| certificatePrehook | object | `{"image":{"pullPolicy":"IfNotPresent","registry":null,"repository":"stackstate/container-tools","tag":"1.1.8"}}` | Helm prehook to setup/remove a certificate for the sidecarInjector mutationwebhook | +| certificatePrehook.image.pullPolicy | string | `"IfNotPresent"` | Policy when pulling an image | +| certificatePrehook.image.registry | string | `nil` | Registry for the docker image. | +| certificatePrehook.image.tag | string | `"1.1.8"` | The tag for the docker image | +| debug | bool | `false` | Enable debugging. This will leave leave artifacts around like the prehook jobs for further inspection | +| enabled | bool | `true` | Enable/disable the mutationwebhook | +| global.imagePullCredentials | object | `{}` | Globally define credentials for pulling images. | +| global.imagePullSecrets | list | `[]` | Globally add image pull secrets that are used. | +| global.imageRegistry | string | `nil` | Globally override the image registry that is used. Can be overridden by specific containers. Defaults to quay.io | +| images.pullSecretName | string | `nil` | | +| proxy | object | `{"image":{"pullPolicy":"IfNotPresent","registry":null,"repository":"stackstate/http-header-injector-proxy","tag":"sha-5ff79451"},"resources":{"limits":{"memory":"40Mi"},"requests":{"memory":"25Mi"}}}` | Proxy being injected into pods for rewriting http headers | +| proxy.image.pullPolicy | string | `"IfNotPresent"` | Policy when pulling an image | +| proxy.image.registry | string | `nil` | Registry for the docker image. | +| proxy.image.tag | string | `"sha-5ff79451"` | The tag for the docker image | +| proxy.resources.limits.memory | string | `"40Mi"` | Memory resource limits. | +| proxy.resources.requests.memory | string | `"25Mi"` | Memory resource requests. | +| proxyInit | object | `{"image":{"pullPolicy":"IfNotPresent","registry":null,"repository":"stackstate/http-header-injector-proxy-init","tag":"sha-5ff79451"}}` | InitContainer within pod which redirects traffic to the proxy container. | +| proxyInit.image.pullPolicy | string | `"IfNotPresent"` | Policy when pulling an image | +| proxyInit.image.registry | string | `nil` | Registry for the docker image | +| proxyInit.image.tag | string | `"sha-5ff79451"` | The tag for the docker image | +| sidecarInjector | object | `{"image":{"pullPolicy":"IfNotPresent","registry":null,"repository":"stackstate/generic-sidecar-injector","tag":"sha-9c852245"}}` | Service for injecting the proxy sidecar into pods | +| sidecarInjector.image.pullPolicy | string | `"IfNotPresent"` | Policy when pulling an image | +| sidecarInjector.image.registry | string | `nil` | Registry for the docker image. | +| sidecarInjector.image.tag | string | `"sha-9c852245"` | The tag for the docker image | +| webhook | object | `{"failurePolicy":"Ignore","tls":{"certManager":{"issuer":"","issuerKind":"ClusterIssuer","issuerNamespace":""},"mode":"generated","provided":{"caBundle":"","crt":"","key":""},"secret":{"name":""}}}` | MutationWebhook that will be installed to inject a sidecar into pods | +| webhook.failurePolicy | string | `"Ignore"` | How should the webhook fail? Best is to use Ignore, because there is a brief moment at initialization when the hook s there but the service not. Also, putting this to fail can cause the control plane be unresponsive. | +| webhook.tls.certManager.issuer | string | `""` | The issuer that is used for the webhook. Only used if you set webhook.tls.mode to "cert-manager". | +| webhook.tls.certManager.issuerKind | string | `"ClusterIssuer"` | The issuer kind that is used for the webhook, valid values are "Issuer" or "ClusterIssuer". Only used if you set webhook.tls.mode to "cert-manager". | +| webhook.tls.certManager.issuerNamespace | string | `""` | The namespace the cert-manager issuer is located in. If left empty defaults to the release's namespace that is used for the webhook. Only used if you set webhook.tls.mode to "cert-manager". | +| webhook.tls.mode | string | `"generated"` | The mode for the webhook. Can be "provided", "generated", "secret" or "cert-manager". If you want to use cert-manager, you need to install it first. NOTE: If you choose "generated", additional privileges are required to create the certificate and webhook at runtime. | +| webhook.tls.provided.caBundle | string | `""` | The caBundle that is used for the webhook. This is the certificate that is used to sign the webhook. Only used if you set webhook.tls.mode to "provided". | +| webhook.tls.provided.crt | string | `""` | The certificate that is used for the webhook. Only used if you set webhook.tls.mode to "provided". | +| webhook.tls.provided.key | string | `""` | The key that is used for the webhook. Only used if you set webhook.tls.mode to "provided". | +| webhook.tls.secret.name | string | `""` | The name of the secret containing the pre-provisioned certificate data that is used for the webhook. Only used if you set webhook.tls.mode to "secret". | + diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/Readme.md.gotpl b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/Readme.md.gotpl new file mode 100644 index 000000000..225032aa2 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/Readme.md.gotpl @@ -0,0 +1,26 @@ +{{ template "chart.header" . }} +{{ template "chart.description" . }} + +Current chart version is `{{ template "chart.version" . }}` + +{{ template "chart.homepageLine" . }} + +{{ template "chart.requirementsSection" . }} + +## Required Values + +No values have to be included to install this chart. After installing this chart, it becomes possible to annotate pods with +the `http-header-injector.stackstate.io/inject: enabled` annotation to make sure the sidecar provided by this chart is +activated on a pod. + +## Recommended Values + +{{ template "chart.valuesSection" . -}} + +## Install + +Install from the command line on Helm with the following command: + +```shell +helm install stackstate/http-header-injector +``` diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/_defines.tpl b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/_defines.tpl new file mode 100644 index 000000000..f1b8b8872 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/_defines.tpl @@ -0,0 +1,82 @@ +{{- define "http-header-injector.app.name" -}} +{{ .Release.Name }}-http-header-injector +{{- end -}} + +{{- define "http-header-injector.webhook-service.name" -}} +{{ .Release.Name }}-http-header-injector +{{- end -}} + +{{- define "http-header-injector.webhook-service.fqname" -}} +{{ .Release.Name }}-http-header-injector.{{ .Release.Namespace }}.svc +{{- end -}} + +{{- define "http-header-injector.cert-secret.name" -}} +{{- if eq .Values.webhook.tls.mode "secret" -}} +{{ .Values.webhook.tls.secret.name }} +{{- else -}} +{{ .Release.Name }}-http-injector-cert +{{- end -}} +{{- end -}} + +{{- define "http-header-injector.cert-clusterrole.name" -}} +{{ .Release.Name }}-http-injector-cert-cluster-role +{{- end -}} + +{{- define "http-header-injector.cert-serviceaccount.name" -}} +{{ .Release.Name }}-http-injector-cert-sa +{{- end -}} + +{{- define "http-header-injector.cert-config.name" -}} +{{ .Release.Name }}-cert-config +{{- end -}} + +{{- define "http-header-injector.mutatingwebhookconfiguration.name" -}} +{{ .Release.Name }}-http-header-injector-webhook.stackstate.io +{{- end -}} + +{{- define "http-header-injector.webhook-config.name" -}} +{{ .Release.Name }}-http-header-injector-config +{{- end -}} + +{{- define "http-header-injector.mutating-webhook.name" -}} +{{ .Release.Name }}-http-header-injector-webhook +{{- end -}} + +{{- define "http-header-injector.pull-secret.name" -}} +{{ include "http-header-injector.app.name" . }}-pull-secret +{{- end -}} + +{{/* If the issuer is located in a different namespace, it is possible to set that, else default to the release namespace */}} +{{- define "cert-manager.certificate.namespace" -}} +{{ .Values.webhook.tls.certManager.issuerNamespace | default .Release.Namespace }} +{{- end -}} + +{{- define "http-header-injector.image.registry.global" -}} + {{- if .Values.global }} + {{- .Values.global.imageRegistry | default "quay.io" -}} + {{- else -}} + quay.io + {{- end -}} +{{- end -}} + +{{- define "http-header-injector.image.registry" -}} + {{- if ((.ContainerConfig).image).registry -}} + {{- tpl .ContainerConfig.image.registry . -}} + {{- else -}} + {{- include "http-header-injector.image.registry.global" . }} + {{- end -}} +{{- end -}} + +{{- define "http-header-injector.image.pullSecrets" -}} + {{- $pullSecrets := list }} + {{- $pullSecrets = append $pullSecrets (include "http-header-injector.pull-secret.name" .) }} + {{- range .Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- if (not (empty $pullSecrets)) -}} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end -}} +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-clusterrolbinding.yaml b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-clusterrolbinding.yaml new file mode 100644 index 000000000..fb804f729 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-clusterrolbinding.yaml @@ -0,0 +1,22 @@ +{{- if eq .Values.webhook.tls.mode "generated" }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: "{{ include "http-header-injector.cert-serviceaccount.name" . }}" + labels: + app.kubernetes.io/component: http-header-injector-cert-hook + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-delete,post-upgrade + "helm.sh/hook-weight": "-3" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: "{{ include "http-header-injector.cert-clusterrole.name" . }}" +subjects: + - kind: ServiceAccount + name: "{{ include "http-header-injector.cert-serviceaccount.name" . }}" + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-clusterrole.yaml b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-clusterrole.yaml new file mode 100644 index 000000000..595ae5c1b --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-clusterrole.yaml @@ -0,0 +1,24 @@ +{{- if eq .Values.webhook.tls.mode "generated" }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: "{{ include "http-header-injector.cert-clusterrole.name" . }}" + labels: + app.kubernetes.io/component: http-header-injector-cert-hook + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-delete,post-upgrade + "helm.sh/hook-weight": "-4" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +rules: + - apiGroups: [ "admissionregistration.k8s.io" ] + resources: [ "mutatingwebhookconfigurations" ] + verbs: [ "get", "create", "patch","update","delete" ] + - apiGroups: [ "" ] + resources: [ "secrets" ] + verbs: [ "create", "get", "patch","update","delete" ] + - apiGroups: [ "apps" ] + resources: [ "deployments" ] + verbs: [ "get" ] +{{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-config.yaml b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-config.yaml new file mode 100644 index 000000000..b0c5f22fd --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-config.yaml @@ -0,0 +1,152 @@ +{{- if eq .Values.webhook.tls.mode "generated" }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: "{{ include "http-header-injector.cert-config.name" . }}" + labels: + app.kubernetes.io/component: http-header-injector-cert-hook + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-delete,post-upgrade + "helm.sh/hook-weight": "-3" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +data: + generate-cert.sh: | + #!/bin/bash + + # We are going for a self-signed certificate here. We would like to use k8s CertificateSigningRequest, however, + # currently there are no out of the box signers that can sign a 'server auth' certificate, which is required for mutation webhooks. + set -ex + + SCRIPTDIR="${BASH_SOURCE%/*}" + + DIR=`mktemp -d` + + cd "$DIR" + + {{ if .Values.enabled }} + echo "Chart enabled, creating secret and webhook" + + openssl genrsa -out ca.key 2048 + + openssl req -x509 -new -nodes -key ca.key -subj "/CN={{ include "http-header-injector.webhook-service.fqname" . }}" -days 10000 -out ca.crt + + openssl genrsa -out tls.key 2048 + + openssl req -new -key tls.key -out tls.csr -config "$SCRIPTDIR/csr.conf" + + openssl x509 -req -in tls.csr -CA ca.crt -CAkey ca.key \ + -CAcreateserial -out tls.crt -days 10000 \ + -extensions v3_ext -extfile "$SCRIPTDIR/csr.conf" -sha256 + + # Create or update the secret + echo "Applying secret" + kubectl create secret tls "{{ include "http-header-injector.cert-secret.name" . }}" \ + -n "{{ .Release.Namespace }}" \ + --cert=./tls.crt \ + --key=./tls.key \ + --dry-run=client \ + -o yaml | kubectl apply -f - + + echo "Applying mutationwebhook" + caBundle=`base64 -w 0 ca.crt` + cat "$SCRIPTDIR/mutatingwebhookconfiguration.yaml" | sed "s/\\\$CA_BUNDLE/$caBundle/g" | kubectl apply -f - + {{ else }} + echo "Chart disabled, not creating secret and webhook" + {{ end }} + delete-cert.sh: | + #!/bin/bash + + set -x + + DIR="${BASH_SOURCE%/*}" + if [[ ! -d "$DIR" ]]; then DIR="$PWD"; fi + if [[ "$DIR" = "." ]]; then DIR="$PWD"; fi + + cd "$DIR" + + # Using detection of deployment hee to also make this work in post-delete. + if kubectl get deployments "{{ include "http-header-injector.app.name" . }}" -n "{{ .Release.Namespace }}"; then + echo "Chart enabled, not removing secret and mutationwebhook" + exit 0 + else + echo "Chart disabled, removing secret and mutationwebhook" + fi + + # Create or update the secret + echo "Deleting secret" + kubectl delete secret "{{ include "http-header-injector.cert-secret.name" . }}" -n "{{ .Release.Namespace }}" + + echo "Applying mutationwebhook" + kubectl delete MutatingWebhookConfiguration "{{ include "http-header-injector.mutating-webhook.name" . }}" -n "{{ .Release.Namespace }}" + + exit 0 + + csr.conf: | + [ req ] + default_bits = 2048 + prompt = no + default_md = sha256 + req_extensions = req_ext + distinguished_name = dn + + [ dn ] + C = NL + ST = Utrecht + L = Hilversum + O = StackState + OU = Dev + CN = {{ include "http-header-injector.webhook-service.fqname" . }} + + [ req_ext ] + subjectAltName = @alt_names + + [ alt_names ] + DNS.1 = {{ include "http-header-injector.webhook-service.fqname" . }} + + [ v3_ext ] + authorityKeyIdentifier=keyid,issuer:always + basicConstraints=CA:FALSE + keyUsage=keyEncipherment,dataEncipherment + extendedKeyUsage=serverAuth + subjectAltName=@alt_names + + mutatingwebhookconfiguration.yaml: | + apiVersion: admissionregistration.k8s.io/v1 + kind: MutatingWebhookConfiguration + metadata: + name: "{{ include "http-header-injector.mutating-webhook.name" . }}" + namespace: "{{ .Release.Namespace }}" + webhooks: + - clientConfig: + caBundle: "$CA_BUNDLE" + service: + name: "{{ include "http-header-injector.webhook-service.name" . }}" + path: /mutate + namespace: {{ .Release.Namespace }} + port: 8443 + # Putting failure on ignore, not doing so can crash the entire control plane if something goes wrong with the service. + failurePolicy: "{{ .Values.webhook.failurePolicy }}" + name: "{{ include "http-header-injector.mutatingwebhookconfiguration.name" . }}" + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: NotIn + values: + - kube-system + - cert-manager + - {{ .Release.Namespace }} + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + resources: + - pods + sideEffects: None + admissionReviewVersions: + - v1 +{{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-job-delete.yaml b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-job-delete.yaml new file mode 100644 index 000000000..027d69b37 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-job-delete.yaml @@ -0,0 +1,42 @@ +{{- if eq .Values.webhook.tls.mode "generated" }} +{{- $containerConfig := dict "ContainerConfig" .Values.certificatePrehook -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }}-header-injector-cert-delete + labels: + app.kubernetes.io/component: http-header-injector-cert-hook-delete + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} + annotations: + "helm.sh/hook": post-delete,post-upgrade + "helm.sh/hook-weight": "-2" + "helm.sh/hook-delete-policy": before-hook-creation{{- if not .Values.debug -}},hook-succeeded{{- end }} +spec: + template: + metadata: + labels: + app.kubernetes.io/component: http-header-injector-delete + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/cert-hook-config.yaml") . | sha256sum }} + spec: + serviceAccountName: "{{ include "http-header-injector.cert-serviceaccount.name" . }}" + {{- include "http-header-injector.image.pullSecrets" . | nindent 6 }} + volumes: + - name: "{{ include "http-header-injector.cert-config.name" . }}" + configMap: + name: "{{ include "http-header-injector.cert-config.name" . }}" + defaultMode: 0777 + containers: + - name: webhook-cert-delete + image: "{{ include "http-header-injector.image.registry" (merge $containerConfig .) }}/{{ .Values.certificatePrehook.image.repository }}:{{ .Values.certificatePrehook.image.tag }}" + imagePullPolicy: {{ .Values.certificatePrehook.image.pullPolicy }} + volumeMounts: + - name: "{{ include "http-header-injector.cert-config.name" . }}" + mountPath: /scripts + command: [ "/scripts/delete-cert.sh" ] + restartPolicy: Never + backoffLimit: 0 +{{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-job-setup.yaml b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-job-setup.yaml new file mode 100644 index 000000000..b8e310442 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-job-setup.yaml @@ -0,0 +1,43 @@ +{{- if eq .Values.webhook.tls.mode "generated" }} +{{- $containerConfig := dict "ContainerConfig" .Values.certificatePrehook -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }}-header-injector-cert-setup + labels: + app.kubernetes.io/component: http-header-injector-cert-hook-setup + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-weight": "-2" + "helm.sh/hook-delete-policy": before-hook-creation{{- if not .Values.debug -}},hook-succeeded{{- end }} +spec: + template: + metadata: + labels: + app.kubernetes.io/component: http-header-injector-setup + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/cert-hook-config.yaml") . | sha256sum }} + spec: + serviceAccountName: "{{ include "http-header-injector.cert-serviceaccount.name" . }}" + {{- include "http-header-injector.image.pullSecrets" . | nindent 6 }} + volumes: + - name: "{{ include "http-header-injector.cert-config.name" . }}" + configMap: + name: "{{ include "http-header-injector.cert-config.name" . }}" + defaultMode: 0777 + containers: + - name: webhook-cert-setup + image: "{{ include "http-header-injector.image.registry" (merge $containerConfig .) }}/{{ .Values.certificatePrehook.image.repository }}:{{ .Values.certificatePrehook.image.tag }}" + imagePullPolicy: {{ .Values.certificatePrehook.image.pullPolicy }} + volumeMounts: + - name: "{{ include "http-header-injector.cert-config.name" . }}" + mountPath: /scripts + readOnly: true + command: ["/scripts/generate-cert.sh"] + restartPolicy: Never + backoffLimit: 0 +{{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-serviceaccount.yaml b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-serviceaccount.yaml new file mode 100644 index 000000000..4d6931b05 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/cert-hook-serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if eq .Values.webhook.tls.mode "generated" }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "{{ include "http-header-injector.cert-serviceaccount.name" . }}" + namespace: {{ .Release.Namespace }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-delete,post-upgrade + "helm.sh/hook-weight": "-4" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app.kubernetes.io/component: http-header-injector-cert-hook + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} + app: "{{ include "http-header-injector.app.name" . }}" +{{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/pull-secret.yaml b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/pull-secret.yaml new file mode 100644 index 000000000..5dc48d931 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/pull-secret.yaml @@ -0,0 +1,29 @@ +{{- $defaultRegistry := .Values.global.imageRegistry }} +{{- $top := . }} +{{- $registryAuthMap := dict }} + +{{- range $registry, $credentials := .Values.global.imagePullCredentials }} + {{- $registryAuthDocument := dict -}} + {{- $_ := set $registryAuthDocument "username" $credentials.username }} + {{- $_ := set $registryAuthDocument "password" $credentials.password }} + {{- $authMessage := printf "%s:%s" $registryAuthDocument.username $registryAuthDocument.password | b64enc }} + {{- $_ := set $registryAuthDocument "auth" $authMessage }} + {{- if eq $registry "default" }} + {{- $registryAuthMap := set $registryAuthMap (include "http-header-injector.image.registry.global" $top) $registryAuthDocument }} + {{ else }} + {{- $registryAuthMap := set $registryAuthMap $registry $registryAuthDocument }} + {{- end }} +{{- end }} +{{- $dockerAuthsDocuments := dict "auths" $registryAuthMap }} + +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/component: http-header-injector + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} + name: {{ include "http-header-injector.pull-secret.name" . }} +data: + .dockerconfigjson: {{ $dockerAuthsDocuments | toJson | b64enc | quote }} +type: kubernetes.io/dockerconfigjson \ No newline at end of file diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-cert-secret.yaml b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-cert-secret.yaml new file mode 100644 index 000000000..ba7a216f2 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-cert-secret.yaml @@ -0,0 +1,15 @@ +{{- if eq .Values.webhook.tls.mode "provided" }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "http-header-injector.cert-secret.name" . }} + namespace: {{ .Release.Namespace }} + labels: + app.kubernetes.io/component: http-header-injector + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} +type: kubernetes.io/tls +data: + tls.crt: {{ .Values.webhook.tls.provided.crt | b64enc }} + tls.key: {{ .Values.webhook.tls.provided.key | b64enc }} +{{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-certificate.yaml b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-certificate.yaml new file mode 100644 index 000000000..752132d44 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-certificate.yaml @@ -0,0 +1,20 @@ +{{- if eq .Values.webhook.tls.mode "cert-manager" }} +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ include "http-header-injector.webhook-service.name" . }} + namespace: {{ include "cert-manager.certificate.namespace" . }} + labels: + app.kubernetes.io/component: http-header-injector + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} +spec: + secretName: {{ include "http-header-injector.cert-secret.name" . }} + issuerRef: + name: {{ .Values.webhook.tls.certManager.issuer }} + kind: {{ .Values.webhook.tls.certManager.issuerKind }} + dnsNames: + - "{{ include "http-header-injector.webhook-service.name" . }}" + - "{{ include "http-header-injector.webhook-service.name" . }}.{{ .Release.Namespace }}" + - "{{ include "http-header-injector.webhook-service.name" . }}.{{ .Release.Namespace }}.svc" +{{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-config.yaml b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-config.yaml new file mode 100644 index 000000000..f611a52e3 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-config.yaml @@ -0,0 +1,125 @@ +{{- if .Values.enabled -}} +{{- $proxyContainerConfig := dict "ContainerConfig" .Values.proxy -}} +{{- $proxyInitContainerConfig := dict "ContainerConfig" .Values.proxyInit -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/component: http-header-injector + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} + name: {{ .Release.Name }}-http-header-injector-config +data: + sidecarconfig.yaml: | + initContainers: + - name: http-header-proxy-init + image: "{{ include "http-header-injector.image.registry" (merge $proxyInitContainerConfig .) }}/{{ .Values.proxyInit.image.repository }}:{{ .Values.proxyInit.image.tag }}" + imagePullPolicy: {{ .Values.proxyInit.image.pullPolicy }} + command: ["/init-iptables.sh"] + env: + - name: CHART_VERSION + value: "{{ .Chart.Version }}" + - name: PROXY_PORT + value: {% if index .Annotations "config.http-header-injector.stackstate.io/proxy-port" %}"{% index .Annotations "config.http-header-injector.stackstate.io/proxy-port" %}"{% else %}"7060"{% end %} + - name: PROXY_UID + value: {% if index .Annotations "config.http-header-injector.stackstate.io/proxy-uid" %}"{% index .Annotations "config.http-header-injector.stackstate.io/proxy-uid" %}"{% else %}"2103"{% end %} + - name: POD_HOST_NETWORK + value: {% .Spec.HostNetwork %} + {% if eq (index .Annotations "linkerd.io/inject") "enabled" %} + - name: LINKERD + value: true + # Reference: https://linkerd.io/2.13/reference/proxy-configuration/ + - name: LINKERD_PROXY_UID + value: {% if index .Annotations "config.linkerd.io/proxy-uid" %}"{% index .Annotations "config.linkerd.io/proxy-uid" %}"{% else %}"2102"{% end %} + # Due to https://github.com/linkerd/linkerd2/issues/10981 this is now not realy possible, still bringing in the code for future reference + - name: LINKERD_ADMIN_PORT + value: {% if index .Annotations "config.linkerd.io/admin-port" %}"{% index .Annotations "config.linkerd.io/admin-port" %}"{% else %}"4191"{% end %} + {% end %} + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + seccompProfile: + type: RuntimeDefault + volumeMounts: + # This is required for iptables to be able to run + - mountPath: /run + name: http-header-proxy-init-xtables-lock + + containers: + - name: http-header-proxy + image: "{{ include "http-header-injector.image.registry" (merge $proxyContainerConfig .) }}/{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}" + imagePullPolicy: {{ .Values.proxy.image.pullPolicy }} + env: + - name: CHART_VERSION + value: "{{ .Chart.Version }}" + - name: PORT + value: {% if index .Annotations "config.http-header-injector.stackstate.io/proxy-port" %}"{% index .Annotations "config.http-header-injector.stackstate.io/proxy-port" %}"{% else %}"7060"{% end %} + - name: DEBUG + value: {% if index .Annotations "config.http-header-injector.stackstate.io/debug" %}"{% index .Annotations "config.http-header-injector.stackstate.io/debug" %}"{% else %}"disabled"{% end %} + securityContext: + runAsUser: {% if index .Annotations "config.http-header-injector.stackstate.io/proxy-uid" %}{% index .Annotations "config.http-header-injector.stackstate.io/proxy-uid" %}{% else %}2103{% end %} + seccompProfile: + type: RuntimeDefault + {{- with .Values.proxy.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + - name: http-header-inject-debug + image: "{{ include "http-header-injector.image.registry" (merge $proxyContainerConfig .) }}/{{ .Values.proxyInit.image.repository }}:{{ .Values.proxyInit.image.tag }}" + imagePullPolicy: {{ .Values.proxyInit.image.pullPolicy }} + command: ["/bin/sh", "-c", "while echo \"Running\"; do sleep 1; done"] + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_ADMIN + - NET_RAW + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: false + runAsUser: 0 + seccompProfile: + type: RuntimeDefault + volumeMounts: + # This is required for iptables to be able to run + - mountPath: /run + name: http-header-proxy-init-xtables-lock + + volumes: + - emptyDir: {} + name: http-header-proxy-init-xtables-lock + + mutationconfig.yaml: | + mutationConfigs: + - name: "http-header-injector" + annotationNamespace: "http-header-injector.stackstate.io" + annotationTrigger: "inject" + annotationConfig: + volumeMounts: [] + initContainersBeforePodInitContainers: [ "http-header-proxy-init" ] + initContainers: [ "http-header-proxy-init" ] + containers: [ "http-header-proxy" ] + volumes: [ "http-header-proxy-init-xtables-lock" ] + volumeMounts: [ ] + # Namespaces are ignored by the mutatingwebhook + ignoreNamespaces: [ ] + - name: "http-header-injector-debug" + annotationNamespace: "http-header-injector-debug.stackstate.io" + annotationTrigger: "inject" + annotationConfig: + volumeMounts: [] + initContainersBeforePodInitContainers: [ ] + initContainers: [ ] + containers: [ "http-header-inject-debug" ] + volumes: [ "http-header-proxy-init-xtables-lock" ] + volumeMounts: [ ] + # Namespaces are ignored by the mutatingwebhook + ignoreNamespaces: [ ] + {{- end -}} \ No newline at end of file diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-deployment.yaml b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-deployment.yaml new file mode 100644 index 000000000..e885d0e46 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-deployment.yaml @@ -0,0 +1,56 @@ +{{- if .Values.enabled -}} +{{- $containerConfig := dict "ContainerConfig" .Values.sidecarInjector -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: http-header-injector + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} + app: "{{ include "http-header-injector.app.name" . }}" + name: "{{ include "http-header-injector.app.name" . }}" +spec: + replicas: 1 + selector: + matchLabels: + app: "{{ include "http-header-injector.app.name" . }}" + template: + metadata: + labels: + app.kubernetes.io/component: http-header-injector + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} + app: "{{ include "http-header-injector.app.name" . }}" + annotations: + checksum/config: {{ include (print $.Template.BasePath "/webhook-config.yaml") . | sha256sum }} + # This is here to make sure the generic injector gets restarted and picks up a new secret that may have been generated upon upgrade. + revision: "{{ .Release.Revision }}" + name: "{{ include "http-header-injector.app.name" . }}" + spec: + {{- include "http-header-injector.image.pullSecrets" . | nindent 6 }} + volumes: + - name: "{{ include "http-header-injector.webhook-config.name" . }}" + configMap: + name: "{{ include "http-header-injector.webhook-config.name" . }}" + - name: "{{ include "http-header-injector.cert-secret.name" . }}" + secret: + secretName: "{{ include "http-header-injector.cert-secret.name" . }}" + containers: + - image: "{{ include "http-header-injector.image.registry" (merge $containerConfig .) }}/{{ .Values.sidecarInjector.image.repository }}:{{ .Values.sidecarInjector.image.tag }}" + imagePullPolicy: {{ .Values.sidecarInjector.image.pullPolicy }} + name: http-header-injector + volumeMounts: + - name: "{{ include "http-header-injector.webhook-config.name" . }}" + mountPath: /etc/webhook/config + readOnly: true + - name: "{{ include "http-header-injector.cert-secret.name" . }}" + mountPath: /etc/webhook/certs + readOnly: true + command: [ "/sidecarinjector" ] + args: + - --port=8443 + - --sidecar-config-file=/etc/webhook/config/sidecarconfig.yaml + - --mutation-config-file=/etc/webhook/config/mutationconfig.yaml + - --cert-file-path=/etc/webhook/certs/tls.crt + - --key-file-path=/etc/webhook/certs/tls.key +{{- end -}} \ No newline at end of file diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-mutatingwebhookconfiguration.yaml b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-mutatingwebhookconfiguration.yaml new file mode 100644 index 000000000..32d58afde --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-mutatingwebhookconfiguration.yaml @@ -0,0 +1,52 @@ +{{- if not (eq .Values.webhook.tls.mode "generated") }} +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: "{{ include "http-header-injector.mutating-webhook.name" . }}" + namespace: "{{ .Release.Namespace }}" + labels: + app.kubernetes.io/component: http-header-injector + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} + annotations: + {{- if eq .Values.webhook.tls.mode "cert-manager" }} + cert-manager.io/inject-ca-from: {{ include "cert-manager.certificate.namespace" . }}/{{ include "http-header-injector.webhook-service.name" . }} + {{- else if eq .Values.webhook.tls.mode "secret" }} + cert-manager.io/inject-ca-from-secret: {{ .Release.Namespace }}/{{ .Values.webhook.tls.secret.name | required "'webhook.tls.secret.name' is required when webhook.tls.mode is 'secret'" }} + {{- end }} +webhooks: + - clientConfig: + {{- if eq .Values.webhook.tls.mode "provided" }} + caBundle: "{{ .Values.webhook.tls.provided.caBundle | b64enc }}" + {{- else if or (eq .Values.webhook.tls.mode "cert-manager") (eq .Values.webhook.tls.mode "secret") }} + caBundle: "" + {{- end }} + service: + name: "{{ include "http-header-injector.webhook-service.name" . }}" + path: /mutate + namespace: {{ .Release.Namespace }} + port: 8443 + # Putting failure on ignore, not doing so can crash the entire control plane if something goes wrong with the service. + failurePolicy: "{{ .Values.webhook.failurePolicy }}" + name: "{{ include "http-header-injector.mutatingwebhookconfiguration.name" . }}" + namespaceSelector: + matchExpressions: + - key: kubernetes.io/metadata.name + operator: NotIn + values: + - kube-system + - cert-manager + - {{ .Release.Namespace }} + rules: + - apiGroups: + - "" + apiVersions: + - v1 + operations: + - CREATE + resources: + - pods + sideEffects: None + admissionReviewVersions: + - v1 +{{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-service.yaml b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-service.yaml new file mode 100644 index 000000000..6936a5d23 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/templates/webhook-service.yaml @@ -0,0 +1,17 @@ +{{- if .Values.enabled -}} +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: http-header-injector + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "http-header-injector.app.name" . }} + name: "{{ include "http-header-injector.webhook-service.name" . }}" +spec: + ports: + - port: 8443 + protocol: TCP + targetPort: 8443 + selector: + app: "{{ include "http-header-injector.app.name" . }}" +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/values.yaml b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/values.yaml new file mode 100644 index 000000000..236a8bb6a --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/charts/http-header-injector/values.yaml @@ -0,0 +1,98 @@ +# enabled -- Enable/disable the mutationwebhook +enabled: true + +# debug -- Enable debugging. This will leave leave artifacts around like the prehook jobs for further inspection +debug: false + +global: + # global.imageRegistry -- Globally override the image registry that is used. Can be overridden by specific containers. Defaults to quay.io + imageRegistry: null + # global.imagePullSecrets -- Globally add image pull secrets that are used. + imagePullSecrets: [] + # global.imagePullCredentials -- Globally define credentials for pulling images. + imagePullCredentials: {} + +images: + pullSecretName: + +# proxy -- Proxy being injected into pods for rewriting http headers +proxy: + image: + # proxy.image.registry -- Registry for the docker image. + registry: + # proxy.image.repository - Repository for the docker image + repository: "stackstate/http-header-injector-proxy" + # proxy.image.pullPolicy -- Policy when pulling an image + pullPolicy: IfNotPresent + # proxy.image.tag -- The tag for the docker image + tag: sha-5ff79451 + + # proxy.resource -- Resources for the proxy container + resources: + requests: + # proxy.resources.requests.memory -- Memory resource requests. + memory: "25Mi" + limits: + # proxy.resources.limits.memory -- Memory resource limits. + memory: "40Mi" + +# proxyInit -- InitContainer within pod which redirects traffic to the proxy container. +proxyInit: + image: + # proxyInit.image.registry -- Registry for the docker image + registry: + # proxyInit.image.repository - Repository for the docker image + repository: "stackstate/http-header-injector-proxy-init" + # proxyInit.image.pullPolicy -- Policy when pulling an image + pullPolicy: IfNotPresent + # proxyInit.image.tag -- The tag for the docker image + tag: sha-5ff79451 + +# sidecarInjector -- Service for injecting the proxy sidecar into pods +sidecarInjector: + image: + # sidecarInjector.image.registry -- Registry for the docker image. + registry: + # sidecarInjector.image.repository - Repository for the docker image + repository: "stackstate/generic-sidecar-injector" + # sidecarInjector.image.pullPolicy -- Policy when pulling an image + pullPolicy: IfNotPresent + # sidecarInjector.image.tag -- The tag for the docker image + tag: sha-9c852245 + +# certificatePrehook -- Helm prehook to setup/remove a certificate for the sidecarInjector mutationwebhook +certificatePrehook: + image: + # certificatePrehook.image.registry -- Registry for the docker image. + registry: + # certificatePrehook.image.repository - Repository for the docker image. + repository: stackstate/container-tools + # certificatePrehook.image.pullPolicy -- Policy when pulling an image + pullPolicy: IfNotPresent + # certificatePrehook.image.tag -- The tag for the docker image + tag: 1.1.8 + +# webhook -- MutationWebhook that will be installed to inject a sidecar into pods +webhook: + # webhook.failurePolicy -- How should the webhook fail? Best is to use Ignore, because there is a brief moment at initialization when the hook s there but the service not. Also, putting this to fail can cause the control plane be unresponsive. + failurePolicy: Ignore + tls: + # webhook.tls.mode -- The mode for the webhook. Can be "provided", "generated", "secret" or "cert-manager". If you want to use cert-manager, you need to install it first. NOTE: If you choose "generated", additional privileges are required to create the certificate and webhook at runtime. + mode: "generated" + provided: + # webhook.tls.provided.caBundle -- The caBundle that is used for the webhook. This is the certificate that is used to sign the webhook. Only used if you set webhook.tls.mode to "provided". + caBundle: "" + # webhook.tls.provided.crt -- The certificate that is used for the webhook. Only used if you set webhook.tls.mode to "provided". + crt: "" + # webhook.tls.provided.key -- The key that is used for the webhook. Only used if you set webhook.tls.mode to "provided". + key: "" + certManager: + # webhook.tls.certManager.issuer -- The issuer that is used for the webhook. Only used if you set webhook.tls.mode to "cert-manager". + issuer: "" + # webhook.tls.certManager.issuerKind -- The issuer kind that is used for the webhook, valid values are "Issuer" or "ClusterIssuer". Only used if you set webhook.tls.mode to "cert-manager". + issuerKind: "ClusterIssuer" + # webhook.tls.certManager.issuerNamespace -- The namespace the cert-manager issuer is located in. If left empty defaults to the release's namespace that is used for the webhook. Only used if you set webhook.tls.mode to "cert-manager". + issuerNamespace: "" + secret: + # webhook.tls.secret.name -- The name of the secret containing the pre-provisioned certificate data that is used for the webhook. Only used if you set webhook.tls.mode to "secret". + name: "" diff --git a/charts/stackstate/stackstate-k8s-agent/questions.yml b/charts/stackstate/stackstate-k8s-agent/questions.yml new file mode 100644 index 000000000..5d6e6a011 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/questions.yml @@ -0,0 +1,184 @@ +questions: + - variable: stackstate.apiKey + label: "StackState API Key" + type: string + description: "The API key for StackState." + required: true + group: General + - variable: stackstate.url + label: "StackState URL" + type: string + description: "The URL where StackState is running." + required: true + group: General + - variable: stackstate.cluster.name + label: "StackState Cluster Name" + type: string + description: "The StackState Cluster Name given when installing the instance of the Kubernetes StackPack in StackState. This is used to identify the cluster in StackState." + required: true + group: General + - variable: all.registry.override + label: "Override Default Image Registry" + type: boolean + description: "Whether or not to override the default image registry." + default: false + group: "General" + show_subquestions_if: true + subquestions: + - variable: all.image.registry + label: "Docker Image Registry" + type: string + description: "The registry to pull the StackState Agent images from." + default: "quay.io" + - variable: global.imagePullCredentials.username + label: "Docker Image Pull Username" + type: string + description: "The username to use when pulling the StackState Agent images." + - variable: global.imagePullCredentials.password + label: "Docker Image Pull Password" + type: secret + description: "The password to use when pulling the StackState Agent images." + - variable: nodeAgent.containers.agent.resources.override + label: "Override Node Agent Resource Allocation" + type: boolean + description: "Whether or not to override the default resources." + default: "false" + group: "Node Agent" + show_subquestions_if: true + subquestions: + - variable: nodeAgent.containers.agent.resources.requests.cpu + label: "CPU Requests" + type: string + description: "The requested CPU for the Node Agent." + default: "20m" + - variable: nodeAgent.containers.agent.resources.requests.memory + label: "Memory Requests" + type: string + description: "The requested memory for the Node Agent." + default: "180Mi" + - variable: nodeAgent.containers.agent.resources.limits.cpu + label: "CPU Limit" + type: string + description: "The CPU limit for the Node Agent." + default: "270m" + - variable: nodeAgent.containers.agent.resources.limits.memory + label: "Memory Limit" + type: string + description: "The memory limit for the Node Agent." + default: "420Mi" + - variable: nodeAgent.containers.processAgent.enabled + label: "Enable Process Agent" + type: boolean + description: "Whether or not to enable the Process Agent." + default: "true" + group: "Process Agent" + - variable: nodeAgent.skipKubeletTLSVerify + label: "Skip Kubelet TLS Verify" + type: boolean + description: "Whether or not to skip TLS verification when connecting to the kubelet API." + default: "true" + group: "Process Agent" + - variable: nodeAgent.containers.processAgent.resources.override + label: "Override Process Agent Resource Allocation" + type: boolean + description: "Whether or not to override the default resources." + default: "false" + group: "Process Agent" + show_subquestions_if: true + subquestions: + - variable: nodeAgent.containers.processAgent.resources.requests.cpu + label: "CPU Requests" + type: string + description: "The requested CPU for the Process Agent." + default: "25m" + - variable: nodeAgent.containers.processAgent.resources.requests.memory + label: "Memory Requests" + type: string + description: "The requested memory for the Process Agent." + default: "128Mi" + - variable: nodeAgent.containers.processAgent.resources.limits.cpu + label: "CPU Limit" + type: string + description: "The CPU limit for the Process Agent." + default: "125m" + - variable: nodeAgent.containers.processAgent.resources.limits.memory + label: "Memory Limit" + type: string + description: "The memory limit for the Process Agent." + default: "400Mi" + - variable: clusterAgent.enabled + label: "Enable Cluster Agent" + type: boolean + description: "Whether or not to enable the Cluster Agent." + default: "true" + group: "Cluster Agent" + - variable: clusterAgent.collection.kubernetesResources.secrets + label: "Collect Secret Resources" + type: boolean + description: | + Whether or not to collect Kubernetes Secrets. + NOTE: StackState will not send the actual data of the secrets, only the metadata and a secure hash of the data. + default: "true" + group: "Cluster Agent" + - variable: clusterAgent.resources.override + label: "Override Cluster Agent Resource Allocation" + type: boolean + description: "Whether or not to override the default resources." + default: "false" + group: "Cluster Agent" + show_subquestions_if: true + subquestions: + - variable: clusterAgent.resources.requests.cpu + label: "CPU Requests" + type: string + description: "The requested CPU for the Cluster Agent." + default: "70m" + - variable: clusterAgent.resources.requests.memory + label: "Memory Requests" + type: string + description: "The requested memory for the Cluster Agent." + default: "512Mi" + - variable: clusterAgent.resources.limits.cpu + label: "CPU Limit" + type: string + description: "The CPU limit for the Cluster Agent." + default: "400m" + - variable: clusterAgent.resources.limits.memory + label: "Memory Limit" + type: string + description: "The memory limit for the Cluster Agent." + default: "800Mi" + - variable: logsAgent.enabled + label: "Enable Logs Agent" + type: boolean + description: "Whether or not to enable the Logs Agent." + default: "true" + group: "Logs Agent" + - variable: logsAgent.resources.override + label: "Override Logs Agent Resource Allocation" + type: boolean + description: "Whether or not to override the default resources." + default: "false" + group: "Logs Agent" + show_subquestions_if: true + subquestions: + - variable: logsAgent.resources.requests.cpu + label: "CPU Requests" + type: string + description: "The requested CPU for the Logs Agent." + default: "20m" + - variable: logsAgent.resources.requests.memory + label: "Memory Requests" + type: string + description: "The requested memory for the Logs Agent." + default: "100Mi" + - variable: logsAgent.resources.limits.cpu + label: "CPU Limit" + type: string + description: "The CPU limit for the Logs Agent." + default: "1300m" + - variable: logsAgent.resources.limits.memory + label: "Memory Limit" + type: string + description: "The memory limit for the Logs Agent." + default: "192Mi" diff --git a/charts/stackstate/stackstate-k8s-agent/templates/_cluster-agent-kube-state-metrics.yaml b/charts/stackstate/stackstate-k8s-agent/templates/_cluster-agent-kube-state-metrics.yaml new file mode 100644 index 000000000..f99fbf618 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/_cluster-agent-kube-state-metrics.yaml @@ -0,0 +1,62 @@ +{{- define "cluster-agent-kube-state-metrics" -}} +{{- $kubeRes := .Values.clusterAgent.collection.kubernetesResources }} +{{- if .Values.clusterAgent.collection.kubeStateMetrics.clusterCheck }} +cluster_check: true +{{- end }} +init_config: +instances: + - collectors: + - nodes + - pods + - services + {{- if $kubeRes.persistentvolumeclaims }} + - persistentvolumeclaims + {{- end }} + {{- if $kubeRes.persistentvolumes }} + - persistentvolumes + {{- end }} + {{- if $kubeRes.namespaces }} + - namespaces + {{- end }} + {{- if $kubeRes.endpoints }} + - endpoints + {{- end }} + {{- if $kubeRes.daemonsets }} + - daemonsets + {{- end }} + {{- if $kubeRes.deployments }} + - deployments + {{- end }} + {{- if $kubeRes.replicasets }} + - replicasets + {{- end }} + {{- if $kubeRes.statefulsets }} + - statefulsets + {{- end }} + {{- if $kubeRes.cronjobs }} + - cronjobs + {{- end }} + {{- if $kubeRes.jobs }} + - jobs + {{- end }} + {{- if $kubeRes.ingresses }} + - ingresses + {{- end }} + {{- if $kubeRes.secrets }} + - secrets + {{- end }} + - resourcequotas + - replicationcontrollers + - limitranges + - horizontalpodautoscalers + - poddisruptionbudgets + - storageclasses + - volumeattachments + {{- if .Values.clusterAgent.collection.kubeStateMetrics.clusterCheck }} + skip_leader_election: true + {{- end }} + labels_as_tags: + {{ .Values.clusterAgent.collection.kubeStateMetrics.labelsAsTags | toYaml | indent 8 }} + annotations_as_tags: + {{ .Values.clusterAgent.collection.kubeStateMetrics.annotationsAsTags | toYaml | indent 8 }} +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/_container-agent.yaml b/charts/stackstate/stackstate-k8s-agent/templates/_container-agent.yaml new file mode 100644 index 000000000..033ca11ec --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/_container-agent.yaml @@ -0,0 +1,192 @@ +{{- define "container-agent" -}} +- name: node-agent +{{- if .Values.all.hardening.enabled}} + lifecycle: + preStop: + exec: + command: [ "/bin/sh", "-c", "echo 'Giving slim.ai monitor time to submit data...'; sleep 120" ] +{{- end }} + image: "{{ include "stackstate-k8s-agent.imageRegistry" . }}/{{ .Values.nodeAgent.containers.agent.image.repository }}:{{ .Values.nodeAgent.containers.agent.image.tag }}" + imagePullPolicy: "{{ .Values.nodeAgent.containers.agent.image.pullPolicy }}" + env: + - name: STS_API_KEY + valueFrom: + secretKeyRef: + name: {{ include "stackstate-k8s-agent.fullname" . }} + key: sts-api-key + - name: STS_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: KUBERNETES_HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: STS_HOSTNAME + value: "$(KUBERNETES_HOSTNAME)-{{ .Values.stackstate.cluster.name}}" + - name: AGENT_VERSION + value: {{ .Values.nodeAgent.containers.agent.image.tag | quote }} + - name: HOST_PROC + value: "/host/proc" + - name: HOST_SYS + value: "/host/sys" + - name: KUBERNETES + value: "true" + - name: STS_APM_ENABLED + value: {{ .Values.nodeAgent.apm.enabled | quote }} + - name: STS_APM_URL + value: {{ include "stackstate-k8s-agent.stackstate.url" . }} + - name: STS_CLUSTER_AGENT_ENABLED + value: {{ .Values.clusterAgent.enabled | quote }} + {{- if .Values.clusterAgent.enabled }} + - name: STS_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: {{ .Release.Name }}-cluster-agent + - name: STS_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: {{ include "stackstate-k8s-agent.fullname" . }} + key: sts-cluster-auth-token + {{- end }} + - name: STS_CLUSTER_NAME + value: {{ .Values.stackstate.cluster.name | quote }} + - name: STS_SKIP_VALIDATE_CLUSTERNAME + value: "true" + - name: STS_CHECKS_TAG_CARDINALITY + value: {{ .Values.nodeAgent.checksTagCardinality | quote }} + {{- if .Values.checksAgent.enabled }} + - name: STS_EXTRA_CONFIG_PROVIDERS + value: "endpointschecks" + {{- end }} + - name: STS_HEALTH_PORT + value: "5555" + - name: STS_LEADER_ELECTION + value: "false" + - name: LOG_LEVEL + value: {{ .Values.nodeAgent.containers.agent.logLevel | default .Values.nodeAgent.logLevel | quote }} + - name: STS_LOG_LEVEL + value: {{ .Values.nodeAgent.containers.agent.logLevel | default .Values.nodeAgent.logLevel | quote }} + - name: STS_NETWORK_TRACING_ENABLED + value: {{ .Values.nodeAgent.networkTracing.enabled | quote }} + - name: STS_PROTOCOL_INSPECTION_ENABLED + value: {{ .Values.nodeAgent.protocolInspection.enabled | quote }} + - name: STS_PROCESS_AGENT_ENABLED + value: {{ .Values.nodeAgent.containers.agent.processAgent.enabled | quote }} + - name: STS_CONTAINER_CHECK_INTERVAL + value: {{ .Values.processAgent.checkIntervals.container | quote }} + - name: STS_CONNECTION_CHECK_INTERVAL + value: {{ .Values.processAgent.checkIntervals.connections | quote }} + - name: STS_PROCESS_CHECK_INTERVAL + value: {{ .Values.processAgent.checkIntervals.process | quote }} + - name: STS_PROCESS_AGENT_URL + value: {{ include "stackstate-k8s-agent.stackstate.url" . }} + - name: STS_SKIP_SSL_VALIDATION + value: {{ .Values.nodeAgent.skipSslValidation | quote }} + - name: STS_SKIP_KUBELET_TLS_VERIFY + value: {{ .Values.nodeAgent.skipKubeletTLSVerify | quote }} + - name: STS_STS_URL + value: {{ include "stackstate-k8s-agent.stackstate.url" . }} + {{- if .Values.nodeAgent.containerRuntime.customSocketPath }} + - name: STS_CRI_SOCKET_PATH + value: {{ .Values.nodeAgent.containerRuntime.customSocketPath }} + {{- end }} + {{- range $key, $value := .Values.nodeAgent.containers.agent.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range $key, $value := .Values.global.extraEnv.open }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range $key, $value := .Values.global.extraEnv.secret }} + - name: {{ $key }} + valueFrom: + secretKeyRef: + name: {{ include "stackstate-k8s-agent.fullname" . }} + key: {{ $key }} + {{- end }} + {{- if .Values.nodeAgent.containers.agent.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: /health + port: healthport + failureThreshold: {{ .Values.nodeAgent.containers.agent.livenessProbe.failureThreshold }} + initialDelaySeconds: {{ .Values.nodeAgent.containers.agent.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nodeAgent.containers.agent.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.nodeAgent.containers.agent.livenessProbe.successThreshold }} + timeoutSeconds: {{ .Values.nodeAgent.containers.agent.livenessProbe.timeoutSeconds }} + {{- end }} + {{- if .Values.nodeAgent.containers.agent.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: /health + port: healthport + failureThreshold: {{ .Values.nodeAgent.containers.agent.readinessProbe.failureThreshold }} + initialDelaySeconds: {{ .Values.nodeAgent.containers.agent.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.nodeAgent.containers.agent.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.nodeAgent.containers.agent.readinessProbe.successThreshold }} + timeoutSeconds: {{ .Values.nodeAgent.containers.agent.readinessProbe.timeoutSeconds }} + {{- end }} + ports: + - containerPort: 8126 + name: traceport + protocol: TCP + - containerPort: 5555 + name: healthport + protocol: TCP + {{- with .Values.nodeAgent.containers.agent.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.nodeAgent.containerRuntime.customSocketPath }} + - name: customcrisocket + mountPath: {{ .Values.nodeAgent.containerRuntime.customSocketPath }} + readOnly: true + {{- end }} + - name: crisocket + mountPath: /var/run/crio/crio.sock + readOnly: true + - name: containerdsocket + mountPath: /var/run/containerd/containerd.sock + readOnly: true + - name: kubelet + mountPath: /var/lib/kubelet + readOnly: true + - name: nfs + mountPath: /var/lib/nfs + readOnly: true + - name: dockersocket + mountPath: /var/run/docker.sock + readOnly: true + - name: dockernetns + mountPath: /run/docker/netns + readOnly: true + - name: dockeroverlay2 + mountPath: /var/lib/docker/overlay2 + readOnly: true + - name: procdir + mountPath: /host/proc + readOnly: true + - name: cgroups + mountPath: /host/sys/fs/cgroup + readOnly: true + {{- if .Values.nodeAgent.config.override }} + {{- range .Values.nodeAgent.config.override }} + - name: config-override-volume + mountPath: {{ .path }}/{{ .name }} + subPath: {{ .path | replace "/" "_"}}_{{ .name }} + readOnly: true + {{- end }} + {{- end }} +{{- if .Values.all.hardening.enabled}} + securityContext: + privileged: true + runAsUser: 0 # root + capabilities: + add: [ "ALL" ] + readOnlyRootFilesystem: false +{{- else }} + securityContext: + privileged: false +{{- end }} +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/_container-process-agent.yaml b/charts/stackstate/stackstate-k8s-agent/templates/_container-process-agent.yaml new file mode 100644 index 000000000..98f4f96b9 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/_container-process-agent.yaml @@ -0,0 +1,148 @@ +{{- define "container-process-agent" -}} +- name: process-agent +{{ if .Values.nodeAgent.containers.processAgent.image.registry }} + image: "{{ .Values.nodeAgent.containers.processAgent.image.registry }}/{{ .Values.nodeAgent.containers.processAgent.image.repository }}:{{ .Values.nodeAgent.containers.processAgent.image.tag }}" +{{ else }} + image: "{{ include "stackstate-k8s-agent.imageRegistry" . }}/{{ .Values.nodeAgent.containers.processAgent.image.repository }}:{{ .Values.nodeAgent.containers.processAgent.image.tag }}" +{{- end }} + imagePullPolicy: "{{ .Values.nodeAgent.containers.processAgent.image.pullPolicy }}" + ports: + - containerPort: 6063 + env: + - name: STS_API_KEY + valueFrom: + secretKeyRef: + name: {{ include "stackstate-k8s-agent.fullname" . }} + key: sts-api-key + - name: STS_KUBERNETES_KUBELET_HOST + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: KUBERNETES_HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: STS_HOSTNAME + value: "$(KUBERNETES_HOSTNAME)-{{ .Values.stackstate.cluster.name}}" + - name: AGENT_VERSION + value: {{ .Values.nodeAgent.containers.processAgent.image.tag | quote }} + - name: STS_LOG_TO_CONSOLE + value: "true" + - name: HOST_PROC + value: "/host/proc" + - name: HOST_SYS + value: "/host/sys" + - name: KUBERNETES + value: "true" + - name: STS_CLUSTER_AGENT_ENABLED + value: {{ .Values.clusterAgent.enabled | quote }} + {{- if .Values.clusterAgent.enabled }} + - name: STS_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: {{ .Release.Name }}-cluster-agent + - name: STS_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: {{ include "stackstate-k8s-agent.fullname" . }} + key: sts-cluster-auth-token + {{- end }} + - name: STS_CLUSTER_NAME + value: {{ .Values.stackstate.cluster.name | quote }} + - name: STS_SKIP_VALIDATE_CLUSTERNAME + value: "true" + - name: LOG_LEVEL + value: {{ .Values.nodeAgent.containers.processAgent.logLevel | default .Values.nodeAgent.logLevel | quote }} + - name: STS_LOG_LEVEL + value: {{ .Values.nodeAgent.containers.processAgent.logLevel | default .Values.nodeAgent.logLevel | quote }} + - name: STS_NETWORK_TRACING_ENABLED + value: {{ .Values.nodeAgent.networkTracing.enabled | quote }} + - name: STS_PROTOCOL_INSPECTION_ENABLED + value: {{ .Values.nodeAgent.protocolInspection.enabled | quote }} + - name: STS_PROCESS_AGENT_ENABLED + value: {{ .Values.nodeAgent.containers.processAgent.enabled | quote }} + - name: STS_CONTAINER_CHECK_INTERVAL + value: {{ .Values.processAgent.checkIntervals.container | quote }} + - name: STS_CONNECTION_CHECK_INTERVAL + value: {{ .Values.processAgent.checkIntervals.connections | quote }} + - name: STS_PROCESS_CHECK_INTERVAL + value: {{ .Values.processAgent.checkIntervals.process | quote }} + - name: STS_PROCESS_AGENT_URL + value: {{ include "stackstate-k8s-agent.stackstate.url" . }} + - name: STS_SKIP_SSL_VALIDATION + value: {{ .Values.nodeAgent.skipSslValidation | quote }} + - name: STS_SKIP_KUBELET_TLS_VERIFY + value: {{ .Values.nodeAgent.skipKubeletTLSVerify | quote }} + - name: STS_STS_URL + value: {{ include "stackstate-k8s-agent.stackstate.url" . }} + - name: STS_HTTP_TRACING_ENABLED + value: {{ .Values.nodeAgent.httpTracing.enabled | quote }} + {{- if .Values.nodeAgent.containerRuntime.customSocketPath }} + - name: STS_CRI_SOCKET_PATH + value: {{ .Values.nodeAgent.containerRuntime.customSocketPath }} + {{- end }} + {{- range $key, $value := .Values.nodeAgent.containers.processAgent.env }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range $key, $value := .Values.global.extraEnv.open }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range $key, $value := .Values.global.extraEnv.secret }} + - name: {{ $key }} + valueFrom: + secretKeyRef: + name: {{ include "stackstate-k8s-agent.fullname" . }} + key: {{ $key }} + {{- end }} + {{- with .Values.nodeAgent.containers.processAgent.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.nodeAgent.containerRuntime.customSocketPath }} + - name: customcrisocket + mountPath: {{ .Values.nodeAgent.containerRuntime.customSocketPath }} + readOnly: true + {{- end }} + - name: crisocket + mountPath: /var/run/crio/crio.sock + readOnly: true + - name: containerdsocket + mountPath: /var/run/containerd/containerd.sock + readOnly: true + - name: sys-kernel-debug + mountPath: /sys/kernel/debug + # Having sys-kernel-debug as read only breaks specific monitors from receiving metrics + # readOnly: true + - name: dockersocket + mountPath: /var/run/docker.sock + readOnly: true + - name: procdir + mountPath: /host/proc + readOnly: true + - name: passwd + mountPath: /etc/passwd + readOnly: true + - name: cgroups + mountPath: /host/sys/fs/cgroup + readOnly: true + {{- if .Values.nodeAgent.config.override }} + {{- range .Values.nodeAgent.config.override }} + - name: config-override-volume + mountPath: {{ .path }}/{{ .name }} + subPath: {{ .path | replace "/" "_"}}_{{ .name }} + readOnly: true + {{- end }} + {{- end }} +{{- if .Values.all.hardening.enabled}} + securityContext: + privileged: true + runAsUser: 0 # root + capabilities: + add: [ "ALL" ] + readOnlyRootFilesystem: false +{{- else }} + securityContext: + privileged: true +{{- end }} +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/_helpers.tpl b/charts/stackstate/stackstate-k8s-agent/templates/_helpers.tpl new file mode 100644 index 000000000..09a27fd6e --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/_helpers.tpl @@ -0,0 +1,175 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "stackstate-k8s-agent.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "stackstate-k8s-agent.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "stackstate-k8s-agent.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "stackstate-k8s-agent.labels" -}} +app.kubernetes.io/name: {{ include "stackstate-k8s-agent.name" . }} +helm.sh/chart: {{ include "stackstate-k8s-agent.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Cluster agent checksum annotations +*/}} +{{- define "stackstate-k8s-agent.checksum-configs" }} +checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} +{{- end }} + +{{/* +StackState URL function +*/}} +{{- define "stackstate-k8s-agent.stackstate.url" -}} +{{ tpl .Values.stackstate.url . | quote }} +{{- end }} + +{{- define "stackstate-k8s-agent.configmap.override.checksum" -}} +{{- if .Values.clusterAgent.config.override }} +checksum/override-configmap: {{ include (print $.Template.BasePath "/cluster-agent-configmap.yaml") . | sha256sum }} +{{- end }} +{{- end }} + +{{- define "stackstate-k8s-agent.nodeAgent.configmap.override.checksum" -}} +{{- if .Values.nodeAgent.config.override }} +checksum/override-configmap: {{ include (print $.Template.BasePath "/node-agent-configmap.yaml") . | sha256sum }} +{{- end }} +{{- end }} + +{{- define "stackstate-k8s-agent.logsAgent.configmap.override.checksum" -}} +checksum/override-configmap: {{ include (print $.Template.BasePath "/logs-agent-configmap.yaml") . | sha256sum }} +{{- end }} + +{{- define "stackstate-k8s-agent.checksAgent.configmap.override.checksum" -}} +{{- if .Values.checksAgent.config.override }} +checksum/override-configmap: {{ include (print $.Template.BasePath "/checks-agent-configmap.yaml") . | sha256sum }} +{{- end }} +{{- end }} + + +{{/* +Return the image registry +*/}} +{{- define "stackstate-k8s-agent.imageRegistry" -}} + {{- if .Values.global }} + {{- .Values.global.imageRegistry | default .Values.all.image.registry -}} + {{- else -}} + {{- .Values.all.image.registry -}} + {{- end -}} +{{- end -}} + +{{/* +Renders a value that contains a template. +Usage: +{{ include "stackstate-k8s-agent.tplvalue.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "stackstate-k8s-agent.tplvalue.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{- define "stackstate-k8s-agent.pull-secret.name" -}} +{{ include "stackstate-k8s-agent.fullname" . }}-pull-secret +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "stackstate-k8s-agent.image.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "stackstate-k8s-agent.image.pullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{/* Is plain array of strings, compatible with all bitnami charts */}} + {{- $pullSecrets = append $pullSecrets (include "stackstate-k8s-agent.tplvalue.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + {{- range $context.Values.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "stackstate-k8s-agent.tplvalue.render" (dict "value" .name "context" $context)) -}} + {{- end -}} + {{- range .images -}} + {{- if .pullSecretName -}} + {{- $pullSecrets = append $pullSecrets (include "stackstate-k8s-agent.tplvalue.render" (dict "value" .pullSecretName "context" $context)) -}} + {{- end -}} + {{- end -}} + {{- $pullSecrets = append $pullSecrets (include "stackstate-k8s-agent.pull-secret.name" $context) -}} + {{- if (not (empty $pullSecrets)) -}} +imagePullSecrets: + {{- range $pullSecrets | uniq }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Check whether the kubernetes-state-metrics configuration is overridden. If so, return 'true' else return nothing (which is false). +{{ include "stackstate-k8s-agent.kube-state-metrics.overridden" $ }} +*/}} +{{- define "stackstate-k8s-agent.kube-state-metrics.overridden" -}} +{{- if .Values.clusterAgent.config.override }} + {{- range $i, $val := .Values.clusterAgent.config.override }} + {{- if and (eq $val.name "conf.yaml") (eq $val.path "/etc/stackstate-agent/conf.d/kubernetes_state.d") }} +true + {{- end }} + {{- end }} +{{- end }} +{{- end -}} + +{{- define "stackstate-k8s-agent.nodeAgent.kube-state-metrics.overridden" -}} +{{- if .Values.nodeAgent.config.override }} + {{- range $i, $val := .Values.nodeAgent.config.override }} + {{- if and (eq $val.name "auto_conf.yaml") (eq $val.path "/etc/stackstate-agent/conf.d/kubernetes_state.d") }} +true + {{- end }} + {{- end }} +{{- end }} +{{- end -}} + +{{/* +Return the appropriate os label +*/}} +{{- define "label.os" -}} +{{- if semverCompare "^1.14-0" .Capabilities.KubeVersion.GitVersion -}} +kubernetes.io/os +{{- else -}} +beta.kubernetes.io/os +{{- end -}} +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/checks-agent-clusterrolebinding.yaml b/charts/stackstate/stackstate-k8s-agent/templates/checks-agent-clusterrolebinding.yaml new file mode 100644 index 000000000..9db8b0bc3 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/checks-agent-clusterrolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.checksAgent.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }}-checks-agent + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: checks-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Name }}-node-agent +subjects: +- apiGroup: "" + kind: ServiceAccount + name: {{ .Release.Name }}-checks-agent + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/checks-agent-configmap.yaml b/charts/stackstate/stackstate-k8s-agent/templates/checks-agent-configmap.yaml new file mode 100644 index 000000000..faeefa1fc --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/checks-agent-configmap.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.checksAgent.enabled .Values.checksAgent.config.override }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-checks-agent + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: checks-agent +data: +{{- range .Values.checksAgent.config.override }} + {{ .path | replace "/" "_"}}_{{ .name }}: | +{{ .data | indent 4 -}} +{{- end -}} +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/checks-agent-deployment.yaml b/charts/stackstate/stackstate-k8s-agent/templates/checks-agent-deployment.yaml new file mode 100644 index 000000000..376db4ddf --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/checks-agent-deployment.yaml @@ -0,0 +1,181 @@ +{{- if .Values.checksAgent.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-checks-agent + namespace: {{ .Release.Namespace }} + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: checks-agent +spec: + selector: + matchLabels: + app.kubernetes.io/component: checks-agent + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "stackstate-k8s-agent.name" . }} + replicas: {{ .Values.checksAgent.replicas }} +{{- with .Values.checksAgent.strategy }} + strategy: + {{- toYaml . | nindent 4 }} +{{- end }} + template: + metadata: + annotations: + {{- include "stackstate-k8s-agent.checksum-configs" . | nindent 8 }} + {{- include "stackstate-k8s-agent.nodeAgent.configmap.override.checksum" . | nindent 8 }} + labels: + app.kubernetes.io/component: checks-agent + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "stackstate-k8s-agent.name" . }} + spec: + {{- include "stackstate-k8s-agent.image.pullSecrets" (dict "images" (list .Values.checksAgent.image .Values.all.image) "context" $) | nindent 6 }} + {{- if .Values.all.hardening.enabled}} + terminationGracePeriodSeconds: 240 + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ include "stackstate-k8s-agent.imageRegistry" . }}/{{ .Values.checksAgent.image.repository }}:{{ .Values.checksAgent.image.tag }}" + imagePullPolicy: "{{ .Values.checksAgent.image.pullPolicy }}" + {{- if .Values.all.hardening.enabled}} + lifecycle: + preStop: + exec: + command: [ "/bin/sh", "-c", "echo 'Giving slim.ai monitor time to submit data...'; sleep 120" ] + {{- end }} + env: + - name: STS_API_KEY + valueFrom: + secretKeyRef: + name: {{ include "stackstate-k8s-agent.fullname" . }} + key: sts-api-key + - name: KUBERNETES_HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: STS_HOSTNAME + value: "$(KUBERNETES_HOSTNAME)-{{ .Values.stackstate.cluster.name}}" + - name: AGENT_VERSION + value: {{ .Values.checksAgent.image.tag | quote }} + - name: LOG_LEVEL + value: {{ .Values.checksAgent.logLevel | quote }} + - name: STS_APM_ENABLED + value: "false" + - name: STS_CLUSTER_AGENT_ENABLED + value: {{ .Values.clusterAgent.enabled | quote }} + {{- if .Values.clusterAgent.enabled }} + - name: STS_CLUSTER_AGENT_KUBERNETES_SERVICE_NAME + value: {{ .Release.Name }}-cluster-agent + - name: STS_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: {{ include "stackstate-k8s-agent.fullname" . }} + key: sts-cluster-auth-token + {{- end }} + - name: STS_CLUSTER_NAME + value: {{ .Values.stackstate.cluster.name | quote }} + - name: STS_SKIP_VALIDATE_CLUSTERNAME + value: "true" + - name: STS_CHECKS_TAG_CARDINALITY + value: {{ .Values.checksAgent.checksTagCardinality | quote }} + - name: STS_EXTRA_CONFIG_PROVIDERS + value: "clusterchecks" + - name: STS_HEALTH_PORT + value: "5555" + - name: STS_LEADER_ELECTION + value: "false" + - name: STS_LOG_LEVEL + value: {{ .Values.checksAgent.logLevel | quote }} + - name: STS_NETWORK_TRACING_ENABLED + value: "false" + - name: STS_PROCESS_AGENT_ENABLED + value: "false" + - name: STS_SKIP_SSL_VALIDATION + value: {{ .Values.checksAgent.skipSslValidation | quote }} + - name: STS_STS_URL + value: {{ include "stackstate-k8s-agent.stackstate.url" . }} + {{- range $key, $value := .Values.global.extraEnv.open }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range $key, $value := .Values.global.extraEnv.secret }} + - name: {{ $key }} + valueFrom: + secretKeyRef: + name: {{ include "stackstate-k8s-agent.fullname" . }} + key: {{ $key }} + {{- end }} + livenessProbe: + httpGet: + path: /health + port: healthport + failureThreshold: {{ .Values.checksAgent.livenessProbe.failureThreshold }} + initialDelaySeconds: {{ .Values.checksAgent.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.checksAgent.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.checksAgent.livenessProbe.successThreshold }} + timeoutSeconds: {{ .Values.checksAgent.livenessProbe.timeoutSeconds }} + readinessProbe: + httpGet: + path: /health + port: healthport + failureThreshold: {{ .Values.checksAgent.readinessProbe.failureThreshold }} + initialDelaySeconds: {{ .Values.checksAgent.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.checksAgent.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.checksAgent.readinessProbe.successThreshold }} + timeoutSeconds: {{ .Values.checksAgent.readinessProbe.timeoutSeconds }} + ports: + - containerPort: 5555 + name: healthport + protocol: TCP + {{- if .Values.all.hardening.enabled}} + securityContext: + privileged: true + runAsUser: 0 # root + capabilities: + add: [ "ALL" ] + readOnlyRootFilesystem: false + {{- else }} + securityContext: + privileged: false + {{- end }} + {{- with .Values.checksAgent.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: confd-empty-volume + mountPath: /etc/stackstate-agent/conf.d + readOnly: true + {{- if .Values.checksAgent.config.override }} + {{- range .Values.checksAgent.config.override }} + - name: config-override-volume + mountPath: {{ .path }}/{{ .name }} + subPath: {{ .path | replace "/" "_"}}_{{ .name }} + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.checksAgent.priorityClassName }} + priorityClassName: {{ .Values.checksAgent.priorityClassName }} + {{- end }} + serviceAccountName: {{ .Release.Name }}-checks-agent + nodeSelector: + {{ template "label.os" . }}: {{ .Values.targetSystem }} + {{- with .Values.checksAgent.nodeSelector }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.checksAgent.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.checksAgent.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: confd-empty-volume + emptyDir: {} + {{- if .Values.checksAgent.config.override }} + - name: config-override-volume + configMap: + name: {{ .Release.Name }}-checks-agent + {{- end }} +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/checks-agent-poddisruptionbudget.yaml b/charts/stackstate/stackstate-k8s-agent/templates/checks-agent-poddisruptionbudget.yaml new file mode 100644 index 000000000..7a9f1d8f9 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/checks-agent-poddisruptionbudget.yaml @@ -0,0 +1,20 @@ +{{- if .Values.checksAgent.enabled }} +{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }} +apiVersion: policy/v1 +{{- else }} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + name: {{ .Release.Name }}-checks-agent + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: checks-agent +spec: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/component: checks-agent + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "stackstate-k8s-agent.name" . }} +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/checks-agent-serviceaccount.yaml b/charts/stackstate/stackstate-k8s-agent/templates/checks-agent-serviceaccount.yaml new file mode 100644 index 000000000..444aad220 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/checks-agent-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.checksAgent.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-checks-agent + namespace: {{ .Release.Namespace }} + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: checks-agent +{{- end -}} +{{- with .Values.checksAgent.serviceaccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} +{{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-clusterrole.yaml b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-clusterrole.yaml new file mode 100644 index 000000000..6a7b27d18 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-clusterrole.yaml @@ -0,0 +1,106 @@ +{{- $kubeRes := .Values.clusterAgent.collection.kubernetesResources }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "stackstate-k8s-agent.fullname" . }} + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: cluster-agent +rules: +- apiGroups: + - "" + resources: + - events + - nodes + - pods + - services + {{- if $kubeRes.namespaces }} + - namespaces + {{- end }} + {{- if .Values.clusterAgent.collection.kubernetesMetrics }} + - componentstatuses + {{- end }} + {{- if $kubeRes.configmaps }} + - configmaps + {{- end }} + {{- if $kubeRes.endpoints }} + - endpoints + {{- end }} + {{- if $kubeRes.persistentvolumeclaims }} + - persistentvolumeclaims + {{- end }} + {{- if $kubeRes.persistentvolumes }} + - persistentvolumes + {{- end }} + {{- if $kubeRes.secrets }} + - secrets + {{- end }} + {{- if $kubeRes.resourcequotas }} + - resourcequotas + {{- end }} + verbs: + - get + - list + - watch +{{- if or $kubeRes.daemonsets $kubeRes.deployments $kubeRes.replicasets $kubeRes.statefulsets }} +- apiGroups: + - "apps" + resources: + {{- if $kubeRes.daemonsets }} + - daemonsets + {{- end }} + {{- if $kubeRes.deployments }} + - deployments + {{- end }} + {{- if $kubeRes.replicasets }} + - replicasets + {{- end }} + {{- if $kubeRes.statefulsets }} + - statefulsets + {{- end }} + verbs: + - get + - list + - watch +{{- end}} +{{- if $kubeRes.ingresses }} +- apiGroups: + - "extensions" + - "networking.k8s.io" + resources: + - ingresses + verbs: + - get + - list + - watch +{{- end}} +{{- if or $kubeRes.cronjobs $kubeRes.jobs }} +- apiGroups: + - "batch" + resources: + {{- if $kubeRes.cronjobs }} + - cronjobs + {{- end }} + {{- if $kubeRes.jobs }} + - jobs + {{- end }} + verbs: + - get + - list + - watch +{{- end}} +- nonResourceURLs: + - "/healthz" + - "/version" + verbs: + - get +- apiGroups: + - "storage.k8s.io" + resources: + {{- if $kubeRes.volumeattachments }} + - volumeattachments + {{- end }} + verbs: + - get + - list + - watch diff --git a/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-clusterrolebinding.yaml b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-clusterrolebinding.yaml new file mode 100644 index 000000000..0b1bd37ea --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-clusterrolebinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "stackstate-k8s-agent.fullname" . }} + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: cluster-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "stackstate-k8s-agent.fullname" . }} +subjects: +- apiGroup: "" + kind: ServiceAccount + name: {{ include "stackstate-k8s-agent.fullname" . }} + namespace: {{ .Release.Namespace }} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-configmap.yaml b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-configmap.yaml new file mode 100644 index 000000000..89273e11b --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-configmap.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-cluster-agent + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: cluster-agent +data: + kubernetes_api_events_conf: | + init_config: + instances: + - collect_events: {{ .Values.clusterAgent.collection.kubernetesEvents }} + event_categories:{{ .Values.clusterAgent.config.events.categories | toYaml | nindent 10 }} + kubernetes_api_topology_conf: | + init_config: + instances: + - collection_interval: {{ .Values.clusterAgent.config.topology.collectionInterval }} + resources:{{ .Values.clusterAgent.collection.kubernetesResources | toYaml | nindent 10 }} + {{- if .Values.clusterAgent.collection.kubeStateMetrics.enabled }} + kube_state_metrics_core_conf: | + {{- include "cluster-agent-kube-state-metrics" . | nindent 6 }} + {{- end }} +{{- if .Values.clusterAgent.config.override }} +{{- range .Values.clusterAgent.config.override }} + {{ .path | replace "/" "_"}}_{{ .name }}: | +{{ .data | indent 4 -}} +{{- end -}} +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-deployment.yaml b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-deployment.yaml new file mode 100644 index 000000000..60c50803a --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-deployment.yaml @@ -0,0 +1,164 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ .Release.Name }}-cluster-agent + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: cluster-agent +spec: + replicas: {{ .Values.clusterAgent.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/component: cluster-agent + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "stackstate-k8s-agent.name" . }} +{{- with .Values.clusterAgent.strategy }} + strategy: + {{- toYaml . | nindent 4 }} +{{- end }} + template: + metadata: + annotations: + {{- include "stackstate-k8s-agent.checksum-configs" . | nindent 8 }} + {{- include "stackstate-k8s-agent.configmap.override.checksum" . | nindent 8 }} + labels: + app.kubernetes.io/component: cluster-agent + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "stackstate-k8s-agent.name" . }} + spec: + {{- include "stackstate-k8s-agent.image.pullSecrets" (dict "images" (list .Values.clusterAgent.image .Values.all.image) "context" $) | nindent 6 }} + {{- if .Values.clusterAgent.priorityClassName }} + priorityClassName: {{ .Values.clusterAgent.priorityClassName }} + {{- end }} + serviceAccountName: {{ include "stackstate-k8s-agent.fullname" . }} + {{- if .Values.all.hardening.enabled}} + terminationGracePeriodSeconds: 240 + {{- end }} + containers: + - name: cluster-agent + image: "{{ include "stackstate-k8s-agent.imageRegistry" . }}/{{ .Values.clusterAgent.image.repository }}:{{ .Values.clusterAgent.image.tag }}" + imagePullPolicy: "{{ .Values.clusterAgent.image.pullPolicy }}" + {{- if .Values.all.hardening.enabled}} + lifecycle: + preStop: + exec: + command: [ "/bin/sh", "-c", "echo 'Giving slim.ai monitor time to submit data...'; sleep 120" ] + {{- end }} + env: + - name: STS_API_KEY + valueFrom: + secretKeyRef: + name: {{ include "stackstate-k8s-agent.fullname" . }} + key: sts-api-key + - name: STS_CLUSTER_AGENT_AUTH_TOKEN + valueFrom: + secretKeyRef: + name: {{ include "stackstate-k8s-agent.fullname" . }} + key: sts-cluster-auth-token + - name: KUBERNETES_HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: STS_HOSTNAME + value: "$(KUBERNETES_HOSTNAME)-{{ .Values.stackstate.cluster.name}}" + - name: LOG_LEVEL + value: {{ .Values.clusterAgent.logLevel | quote }} + {{- if .Values.checksAgent.enabled }} + - name: STS_CLUSTER_CHECKS_ENABLED + value: "true" + - name: STS_EXTRA_CONFIG_PROVIDERS + value: "kube_endpoints kube_services" + - name: STS_EXTRA_LISTENERS + value: "kube_endpoints kube_services" + {{- end }} + - name: STS_CLUSTER_NAME + value: {{.Values.stackstate.cluster.name | quote }} + - name: STS_SKIP_VALIDATE_CLUSTERNAME + value: "true" + - name: STS_COLLECT_KUBERNETES_METRICS + value: {{ .Values.clusterAgent.collection.kubernetesMetrics | quote }} + - name: STS_COLLECT_KUBERNETES_TIMEOUT + value: {{ .Values.clusterAgent.collection.kubernetesTimeout | quote }} + - name: STS_COLLECT_KUBERNETES_TOPOLOGY + value: {{ .Values.clusterAgent.collection.kubernetesTopology | quote }} + - name: STS_LEADER_ELECTION + value: "true" + - name: STS_LOG_LEVEL + value: {{ .Values.clusterAgent.logLevel | quote }} + - name: STS_CLUSTER_AGENT_CMD_PORT + value: {{ .Values.clusterAgent.service.targetPort | quote }} + - name: STS_STS_URL + value: {{ include "stackstate-k8s-agent.stackstate.url" . }} + {{- if .Values.clusterAgent.config.configMap.maxDataSize }} + - name: STS_CONFIGMAP_MAX_DATASIZE + value: {{ .Values.clusterAgent.config.configMap.maxDataSize | quote }} + {{- end}} + {{- range $key, $value := .Values.global.extraEnv.open }} + - name: {{ $key }} + value: {{ $value | quote }} + {{- end }} + {{- range $key, $value := .Values.global.extraEnv.secret }} + - name: {{ $key }} + valueFrom: + secretKeyRef: + name: {{ include "stackstate-k8s-agent.fullname" . }} + key: {{ $key }} + {{- end }} + {{- if .Values.all.hardening.enabled}} + securityContext: + privileged: true + runAsUser: 0 # root + capabilities: + add: [ "ALL" ] + readOnlyRootFilesystem: false + {{- else }} + securityContext: + privileged: false + {{- end }} + {{- with .Values.clusterAgent.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: logs + mountPath: /var/log/stackstate-agent + - name: config-override-volume + mountPath: /etc/stackstate-agent/conf.d/kubernetes_api_events.d/conf.yaml + subPath: kubernetes_api_events_conf + - name: config-override-volume + mountPath: /etc/stackstate-agent/conf.d/kubernetes_api_topology.d/conf.yaml + subPath: kubernetes_api_topology_conf + readOnly: true + {{- if .Values.clusterAgent.collection.kubeStateMetrics.enabled }} + - name: config-override-volume + mountPath: /etc/stackstate-agent/conf.d/kubernetes_state_core.d/conf.yaml + subPath: kube_state_metrics_core_conf + readOnly: true + {{- end }} + {{- if .Values.clusterAgent.config.override }} + {{- range .Values.clusterAgent.config.override }} + - name: config-override-volume + mountPath: {{ .path }}/{{ .name }} + subPath: {{ .path | replace "/" "_"}}_{{ .name }} + readOnly: true + {{- end }} + {{- end }} + nodeSelector: + {{ template "label.os" . }}: {{ .Values.targetSystem }} + {{- with .Values.clusterAgent.nodeSelector }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.clusterAgent.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.clusterAgent.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: logs + emptyDir: {} + - name: config-override-volume + configMap: + name: {{ .Release.Name }}-cluster-agent diff --git a/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-poddisruptionbudget.yaml b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-poddisruptionbudget.yaml new file mode 100644 index 000000000..652fa63d9 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-poddisruptionbudget.yaml @@ -0,0 +1,18 @@ +{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }} +apiVersion: policy/v1 +{{- else }} +apiVersion: policy/v1beta1 +{{- end }} +kind: PodDisruptionBudget +metadata: + name: {{ include "stackstate-k8s-agent.fullname" . }} + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: cluster-agent +spec: + maxUnavailable: 1 + selector: + matchLabels: + app.kubernetes.io/component: cluster-agent + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "stackstate-k8s-agent.name" . }} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-role.yaml b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-role.yaml new file mode 100644 index 000000000..afe1594c1 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-role.yaml @@ -0,0 +1,18 @@ +{{- $kubeRes := .Values.clusterAgent.collection.kubernetesResources }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "stackstate-k8s-agent.fullname" . }} + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: cluster-agent +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - get + - patch + - update diff --git a/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-rolebinding.yaml b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-rolebinding.yaml new file mode 100644 index 000000000..befaa77f2 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-rolebinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "stackstate-k8s-agent.fullname" . }} + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: cluster-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "stackstate-k8s-agent.fullname" . }} +subjects: +- apiGroup: "" + kind: ServiceAccount + name: {{ include "stackstate-k8s-agent.fullname" . }} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-service.yaml b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-service.yaml new file mode 100644 index 000000000..93c39aaba --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-cluster-agent + namespace: {{ .Release.Namespace }} + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: cluster-agent +spec: + ports: + - name: clusteragent + port: {{int .Values.clusterAgent.service.port }} + protocol: TCP + targetPort: {{int .Values.clusterAgent.service.targetPort }} + selector: + app.kubernetes.io/component: cluster-agent + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "stackstate-k8s-agent.name" . }} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-serviceaccount.yaml b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-serviceaccount.yaml new file mode 100644 index 000000000..ff7b7be35 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/cluster-agent-serviceaccount.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "stackstate-k8s-agent.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: cluster-agent +{{- with .Values.clusterAgent.serviceaccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} +{{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/logs-agent-clusterrole.yaml b/charts/stackstate/stackstate-k8s-agent/templates/logs-agent-clusterrole.yaml new file mode 100644 index 000000000..70d70aa47 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/logs-agent-clusterrole.yaml @@ -0,0 +1,20 @@ +{{- if .Values.logsAgent.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Name }}-logs-agent + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: logs-agent +rules: +- apiGroups: # Kubelet connectivity + - "" + resources: + - nodes + - services + - pods + verbs: + - get + - watch + - list +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/logs-agent-clusterrolebinding.yaml b/charts/stackstate/stackstate-k8s-agent/templates/logs-agent-clusterrolebinding.yaml new file mode 100644 index 000000000..802c5d8c5 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/logs-agent-clusterrolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.logsAgent.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }}-logs-agent + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: logs-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Name }}-logs-agent +subjects: +- apiGroup: "" + kind: ServiceAccount + name: {{ .Release.Name }}-logs-agent + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/logs-agent-configmap.yaml b/charts/stackstate/stackstate-k8s-agent/templates/logs-agent-configmap.yaml new file mode 100644 index 000000000..c934777ef --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/logs-agent-configmap.yaml @@ -0,0 +1,54 @@ +{{- if .Values.logsAgent.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-logs-agent + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: logs-agent +data: + promtail.yaml: | + server: + http_listen_port: 9080 + grpc_listen_port: 0 + + clients: + - url: {{ tpl .Values.stackstate.url . }}/logs/k8s?api_key=${STS_API_KEY} + external_labels: + sts_cluster_name: {{ .Values.stackstate.cluster.name | quote }} + + positions: + filename: /tmp/positions.yaml + target_config: + sync_period: 10s + scrape_configs: + - job_name: pod-logs + kubernetes_sd_configs: + - role: pod + pipeline_stages: + - docker: {} + - cri: {} + relabel_configs: + - action: replace + source_labels: + - __meta_kubernetes_pod_name + target_label: pod_name + - action: replace + source_labels: + - __meta_kubernetes_pod_uid + target_label: pod_uid + - action: replace + source_labels: + - __meta_kubernetes_pod_container_name + target_label: container_name + # The __path__ is required by the promtail client + - replacement: /var/log/pods/*$1/*.log + separator: / + source_labels: + - __meta_kubernetes_pod_uid + - __meta_kubernetes_pod_container_name + target_label: __path__ + # Drop all remaining labels, we do not need those + - action: drop + regex: __meta_(.*) +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/logs-agent-daemonset.yaml b/charts/stackstate/stackstate-k8s-agent/templates/logs-agent-daemonset.yaml new file mode 100644 index 000000000..015cdba2a --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/logs-agent-daemonset.yaml @@ -0,0 +1,90 @@ +{{- if .Values.logsAgent.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ .Release.Name }}-logs-agent + namespace: {{ .Release.Namespace }} + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: logs-agent +spec: + selector: + matchLabels: + app.kubernetes.io/component: logs-agent + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "stackstate-k8s-agent.name" . }} +{{- with .Values.logsAgent.updateStrategy }} + updateStrategy: + {{- toYaml . | nindent 4 }} +{{- end }} + template: + metadata: + annotations: + {{- include "stackstate-k8s-agent.checksum-configs" . | nindent 8 }} + {{- include "stackstate-k8s-agent.logsAgent.configmap.override.checksum" . | nindent 8 }} + labels: + app.kubernetes.io/component: logs-agent + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "stackstate-k8s-agent.name" . }} + spec: + {{- include "stackstate-k8s-agent.image.pullSecrets" (dict "images" (list .Values.logsAgent.image .Values.all.image) "context" $) | nindent 6 }} + containers: + - name: logs-agent + image: "{{ include "stackstate-k8s-agent.imageRegistry" . }}/{{ .Values.logsAgent.image.repository }}:{{ .Values.logsAgent.image.tag }}" + args: + - -config.expand-env=true + - -config.file=/etc/promtail/promtail.yaml + imagePullPolicy: "{{ .Values.logsAgent.image.pullPolicy }}" + env: + - name: STS_API_KEY + valueFrom: + secretKeyRef: + name: {{ include "stackstate-k8s-agent.fullname" . }} + key: sts-api-key + - name: "HOSTNAME" # needed when using kubernetes_sd_configs + valueFrom: + fieldRef: + fieldPath: "spec.nodeName" + securityContext: + privileged: false + {{- with .Values.logsAgent.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: logs + mountPath: /var/log + readOnly: true + - name: logs-agent-config + mountPath: /etc/promtail + readOnly: true + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + {{- if .Values.logsAgent.priorityClassName }} + priorityClassName: {{ .Values.logsAgent.priorityClassName }} + {{- end }} + serviceAccountName: {{ .Release.Name }}-logs-agent + {{- with .Values.logsAgent.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.logsAgent.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.logsAgent.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: logs + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + - name: logs-agent-config + configMap: + name: {{ .Release.Name }}-logs-agent +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/logs-agent-serviceaccount.yaml b/charts/stackstate/stackstate-k8s-agent/templates/logs-agent-serviceaccount.yaml new file mode 100644 index 000000000..e562c04e4 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/logs-agent-serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.logsAgent.enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-logs-agent + namespace: {{ .Release.Namespace }} + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: logs-agent +{{- with .Values.logsAgent.serviceaccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} +{{- end }} +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/node-agent-clusterrole.yaml b/charts/stackstate/stackstate-k8s-agent/templates/node-agent-clusterrole.yaml new file mode 100644 index 000000000..11a53c6ed --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/node-agent-clusterrole.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Name }}-node-agent + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: node-agent +rules: +- apiGroups: # Kubelet connectivity + - "" + resources: + - nodes/metrics + - nodes/proxy + - nodes/spec + - endpoints + verbs: + - get + - list diff --git a/charts/stackstate/stackstate-k8s-agent/templates/node-agent-clusterrolebinding.yaml b/charts/stackstate/stackstate-k8s-agent/templates/node-agent-clusterrolebinding.yaml new file mode 100644 index 000000000..8a33cb0bc --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/node-agent-clusterrolebinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }}-node-agent + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: node-agent +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Name }}-node-agent +subjects: +- apiGroup: "" + kind: ServiceAccount + name: {{ .Release.Name }}-node-agent + namespace: {{ .Release.Namespace }} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/node-agent-configmap.yaml b/charts/stackstate/stackstate-k8s-agent/templates/node-agent-configmap.yaml new file mode 100644 index 000000000..8fdd99258 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/node-agent-configmap.yaml @@ -0,0 +1,14 @@ +{{- if .Values.nodeAgent.config.override }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-node-agent + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: node-agent +data: +{{- range .Values.nodeAgent.config.override }} + {{ .path | replace "/" "_"}}_{{ .name }}: | +{{ .data | indent 4 -}} +{{- end -}} +{{- end -}} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/node-agent-daemonset.yaml b/charts/stackstate/stackstate-k8s-agent/templates/node-agent-daemonset.yaml new file mode 100644 index 000000000..d10182508 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/node-agent-daemonset.yaml @@ -0,0 +1,101 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ .Release.Name }}-node-agent + namespace: {{ .Release.Namespace }} + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: node-agent +spec: + selector: + matchLabels: + app.kubernetes.io/component: node-agent + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "stackstate-k8s-agent.name" . }} +{{- with .Values.nodeAgent.updateStrategy }} + updateStrategy: + {{- toYaml . | nindent 4 }} +{{- end }} + template: + metadata: + annotations: + {{- include "stackstate-k8s-agent.checksum-configs" . | nindent 8 }} + {{- include "stackstate-k8s-agent.nodeAgent.configmap.override.checksum" . | nindent 8 }} + labels: + app.kubernetes.io/component: node-agent + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "stackstate-k8s-agent.name" . }} + spec: + {{- include "stackstate-k8s-agent.image.pullSecrets" (dict "images" (list .Values.nodeAgent.containers.agent.image .Values.all.image) "context" $) | nindent 6 }} + {{- if .Values.all.hardening.enabled}} + terminationGracePeriodSeconds: 240 + {{- end }} + containers: + {{- include "container-agent" . | nindent 6 }} + {{- if .Values.nodeAgent.containers.processAgent.enabled }} + {{- include "container-process-agent" . | nindent 6 }} + {{- end }} + dnsPolicy: ClusterFirstWithHostNet + hostNetwork: true + hostPID: true + {{- if .Values.nodeAgent.priorityClassName }} + priorityClassName: {{ .Values.nodeAgent.priorityClassName }} + {{- end }} + serviceAccountName: {{ .Release.Name }}-node-agent + nodeSelector: + {{ template "label.os" . }}: {{ .Values.targetSystem }} + {{- with .Values.nodeAgent.nodeSelector }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeAgent.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeAgent.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + {{- if .Values.nodeAgent.containerRuntime.customSocketPath }} + - hostPath: + path: {{ .Values.nodeAgent.containerRuntime.customSocketPath }} + name: customcrisocket + {{- end }} + - hostPath: + path: /var/lib/kubelet + name: kubelet + - hostPath: + path: /var/lib/nfs + name: nfs + - hostPath: + path: /var/lib/docker/overlay2 + name: dockeroverlay2 + - hostPath: + path: /run/docker/netns + name: dockernetns + - hostPath: + path: /var/run/crio/crio.sock + name: crisocket + - hostPath: + path: /var/run/containerd/containerd.sock + name: containerdsocket + - hostPath: + path: /sys/kernel/debug + name: sys-kernel-debug + - hostPath: + path: /var/run/docker.sock + name: dockersocket + - hostPath: + path: {{ .Values.nodeAgent.containerRuntime.hostProc }} + name: procdir + - hostPath: + path: /etc/passwd + name: passwd + - hostPath: + path: /sys/fs/cgroup + name: cgroups + {{- if .Values.nodeAgent.config.override }} + - name: config-override-volume + configMap: + name: {{ .Release.Name }}-node-agent + {{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/node-agent-scc.yaml b/charts/stackstate/stackstate-k8s-agent/templates/node-agent-scc.yaml new file mode 100644 index 000000000..562a099c7 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/node-agent-scc.yaml @@ -0,0 +1,56 @@ +{{- if .Values.nodeAgent.scc.enabled }} +allowHostDirVolumePlugin: true +# was true +allowHostIPC: true +# was true +allowHostNetwork: true +# Allow host PID for dogstatsd origin detection +allowHostPID: true +# Allow host ports for dsd / trace / logs intake +allowHostPorts: true +allowPrivilegeEscalation: true +# was true +allowPrivilegedContainer: true +# was - '*' +allowedCapabilities: [] +allowedUnsafeSysctls: +- '*' +apiVersion: security.openshift.io/v1 +defaultAddCapabilities: null +fsGroup: +# was RunAsAny + type: MustRunAs +groups: [] +kind: SecurityContextConstraints +metadata: + name: {{ .Release.Name }}-node-agent + namespace: {{ .Release.Namespace }} +priority: null +readOnlyRootFilesystem: false +requiredDropCapabilities: null +# was RunAsAny +runAsUser: + type: MustRunAsRange +# Use the `spc_t` selinux type to access the +# docker socket + proc and cgroup stats +seLinuxContext: + type: RunAsAny + seLinuxOptions: + user: "system_u" + role: "system_r" + type: "spc_t" + level: "s0" +# was - '*' +seccompProfiles: [] +supplementalGroups: + type: RunAsAny +users: +- system:serviceaccount:{{ .Release.Namespace }}:{{ .Release.Name }}-node-agent +# Allow hostPath for docker / process metrics +volumes: + - configMap + - downwardAPI + - emptyDir + - hostPath + - secret +{{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/node-agent-service.yaml b/charts/stackstate/stackstate-k8s-agent/templates/node-agent-service.yaml new file mode 100644 index 000000000..ad5ad71ce --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/node-agent-service.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ .Release.Name }}-node-agent + namespace: {{ .Release.Namespace }} + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: node-agent +{{- with .Values.nodeAgent.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.nodeAgent.service.type }} +{{- if eq .Values.nodeAgent.service.type "LoadBalancer" }} + loadBalancerSourceRanges: {{ toYaml .Values.nodeAgent.service.loadBalancerSourceRanges | nindent 4}} +{{- end }} + ports: + - name: traceport + port: 8126 + protocol: TCP + targetPort: 8126 + selector: + app.kubernetes.io/component: node-agent + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/name: {{ include "stackstate-k8s-agent.name" . }} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/node-agent-serviceaccount.yaml b/charts/stackstate/stackstate-k8s-agent/templates/node-agent-serviceaccount.yaml new file mode 100644 index 000000000..935fa9674 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/node-agent-serviceaccount.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-node-agent + namespace: {{ .Release.Namespace }} + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} + app.kubernetes.io/component: node-agent +{{- with .Values.nodeAgent.serviceaccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} +{{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/openshift-logging-secret.yaml b/charts/stackstate/stackstate-k8s-agent/templates/openshift-logging-secret.yaml new file mode 100644 index 000000000..df813afe2 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/openshift-logging-secret.yaml @@ -0,0 +1,17 @@ +{{- if .Values.openShiftLogging.installSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "stackstate-k8s-agent.fullname" . }}-logging-secret + namespace: openshift-logging + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} +type: Opaque +data: + username: {{ "apikey" | b64enc | quote }} +{{- if .Values.global.receiverApiKey }} + password: {{ .Values.global.receiverApiKey | b64enc | quote }} +{{- else }} + password: {{ .Values.stackstate.apiKey | b64enc | quote }} +{{- end }} +{{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/templates/pull-secret.yaml b/charts/stackstate/stackstate-k8s-agent/templates/pull-secret.yaml new file mode 100644 index 000000000..441a42a15 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/pull-secret.yaml @@ -0,0 +1,35 @@ +{{- $defaultRegistry := .Values.global.imageRegistry }} +{{- $top := . }} +{{- $registryAuthMap := dict }} + +{{- range $registry, $credentials := .Values.global.imagePullCredentials }} + {{- $registryAuthDocument := dict -}} + {{- $_ := set $registryAuthDocument "username" $credentials.username }} + {{- $_ := set $registryAuthDocument "password" $credentials.password }} + {{- $authMessage := printf "%s:%s" $registryAuthDocument.username $registryAuthDocument.password | b64enc }} + {{- $_ := set $registryAuthDocument "auth" $authMessage }} + {{- if eq $registry "default" }} + {{- $registryAuthMap := set $registryAuthMap (include "stackstate-k8s-agent.imageRegistry" $top) $registryAuthDocument }} + {{ else }} + {{- $registryAuthMap := set $registryAuthMap $registry $registryAuthDocument }} + {{- end }} +{{- end }} + +{{- if .Values.all.image.pullSecretUsername }} + {{- $registryAuthDocument := dict -}} + {{- $_ := set $registryAuthDocument "username" .Values.all.image.pullSecretUsername }} + {{- $_ := set $registryAuthDocument "password" .Values.all.image.pullSecretPassword }} + {{- $authMessage := printf "%s:%s" $registryAuthDocument.username $registryAuthDocument.password | b64enc }} + {{- $_ := set $registryAuthDocument "auth" $authMessage }} + {{- $registryAuthMap := set $registryAuthMap (include "stackstate-k8s-agent.imageRegistry" $top) $registryAuthDocument }} +{{- end }} + +{{- $dockerAuthsDocuments := dict "auths" $registryAuthMap }} + +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "stackstate-k8s-agent.pull-secret.name" . }} +data: + .dockerconfigjson: {{ $dockerAuthsDocuments | toJson | b64enc | quote }} +type: kubernetes.io/dockerconfigjson diff --git a/charts/stackstate/stackstate-k8s-agent/templates/secret.yaml b/charts/stackstate/stackstate-k8s-agent/templates/secret.yaml new file mode 100644 index 000000000..31057ccf3 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/templates/secret.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "stackstate-k8s-agent.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "stackstate-k8s-agent.labels" . | indent 4 }} +type: Opaque +data: +{{- if .Values.global.receiverApiKey }} + sts-api-key: {{ .Values.global.receiverApiKey | b64enc | quote }} +{{- else }} + sts-api-key: {{ .Values.stackstate.apiKey | b64enc | quote }} +{{- end }} +{{- if .Values.stackstate.cluster.authToken }} + sts-cluster-auth-token: {{ .Values.stackstate.cluster.authToken | b64enc | quote }} +{{- else }} + sts-cluster-auth-token: {{ randAlphaNum 32 | b64enc | quote }} +{{- end }} +{{- range $key, $value := .Values.global.extraEnv.secret }} + {{ $key }}: {{ $value | b64enc | quote }} +{{- end }} diff --git a/charts/stackstate/stackstate-k8s-agent/test/clusteragent_resources_test.go b/charts/stackstate/stackstate-k8s-agent/test/clusteragent_resources_test.go new file mode 100644 index 000000000..25875e871 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/test/clusteragent_resources_test.go @@ -0,0 +1,145 @@ +package test + +import ( + "regexp" + "strings" + "testing" + + v1 "k8s.io/api/rbac/v1" + + "github.com/stretchr/testify/assert" + "gitlab.com/StackVista/DevOps/helm-charts/helmtestutil" +) + +var requiredRules = []string{ + "events+get,list,watch", + "nodes+get,list,watch", + "pods+get,list,watch", + "services+get,list,watch", + "configmaps+create,get,patch,update", +} + +var optionalRules = []string{ + "namespaces+get,list,watch", + "componentstatuses+get,list,watch", + "configmaps+list,watch", // get is already required + "endpoints+get,list,watch", + "persistentvolumeclaims+get,list,watch", + "persistentvolumes+get,list,watch", + "secrets+get,list,watch", + "apps/daemonsets+get,list,watch", + "apps/deployments+get,list,watch", + "apps/replicasets+get,list,watch", + "apps/statefulsets+get,list,watch", + "extensions/ingresses+get,list,watch", + "batch/cronjobs+get,list,watch", + "batch/jobs+get,list,watch", +} + +var roleDescriptionRegexp = regexp.MustCompile(`^((?P\w+)/)?(?P\w+)\+(?P[\w,]+)`) + +type Rule struct { + Group string + ResourceName string + Verb string +} + +func assertRuleExistence(t *testing.T, rules []v1.PolicyRule, roleDescription string, shouldBePresent bool) { + match := roleDescriptionRegexp.FindStringSubmatch(roleDescription) + assert.NotNil(t, match) + + var roleRules []Rule + for _, rule := range rules { + for _, group := range rule.APIGroups { + for _, resource := range rule.Resources { + for _, verb := range rule.Verbs { + roleRules = append(roleRules, Rule{group, resource, verb}) + } + } + } + } + + resGroup := match[roleDescriptionRegexp.SubexpIndex("group")] + resName := match[roleDescriptionRegexp.SubexpIndex("name")] + verbs := strings.Split(match[roleDescriptionRegexp.SubexpIndex("verbs")], ",") + + for _, verb := range verbs { + requiredRule := Rule{resGroup, resName, verb} + found := false + for _, rule := range roleRules { + if rule == requiredRule { + found = true + break + } + } + if shouldBePresent { + assert.Truef(t, found, "Rule %v has not been found", requiredRule) + } else { + assert.Falsef(t, found, "Rule %v should not be present", requiredRule) + } + } +} + +func TestAllResourcesAreEnabled(t *testing.T) { + output := helmtestutil.RenderHelmTemplate(t, "stackstate-k8s-agent", "values/minimal.yaml") + resources := helmtestutil.NewKubernetesResources(t, output) + + assert.Contains(t, resources.ClusterRoles, "stackstate-k8s-agent") + assert.Contains(t, resources.Roles, "stackstate-k8s-agent") + rules := resources.ClusterRoles["stackstate-k8s-agent"].Rules + rules = append(rules, resources.Roles["stackstate-k8s-agent"].Rules...) + + for _, requiredRole := range requiredRules { + assertRuleExistence(t, rules, requiredRole, true) + } + // be default, everything is enabled, so all the optional roles should be present as well + for _, optionalRule := range optionalRules { + assertRuleExistence(t, rules, optionalRule, true) + } +} + +func TestMostOfResourcesAreDisabled(t *testing.T) { + output := helmtestutil.RenderHelmTemplate(t, "stackstate-k8s-agent", "values/minimal.yaml", "values/disable-all-resource.yaml") + resources := helmtestutil.NewKubernetesResources(t, output) + + assert.Contains(t, resources.ClusterRoles, "stackstate-k8s-agent") + assert.Contains(t, resources.Roles, "stackstate-k8s-agent") + rules := resources.ClusterRoles["stackstate-k8s-agent"].Rules + rules = append(rules, resources.Roles["stackstate-k8s-agent"].Rules...) + + for _, requiredRole := range requiredRules { + assertRuleExistence(t, rules, requiredRole, true) + } + + // we expect all optional resources to be removed from ClusterRole with the given values + for _, optionalRule := range optionalRules { + assertRuleExistence(t, rules, optionalRule, false) + } +} + +func TestNoClusterWideModificationRights(t *testing.T) { + output := helmtestutil.RenderHelmTemplate(t, "stackstate-k8s-agent", "values/minimal.yaml", "values/http-header-injector.yaml") + resources := helmtestutil.NewKubernetesResources(t, output) + assert.Contains(t, resources.ClusterRoles, "stackstate-k8s-agent") + illegalVerbs := []string{"create", "patch", "update", "delete"} + + for _, clusterRole := range resources.ClusterRoles { + for _, rule := range clusterRole.Rules { + for _, verb := range rule.Verbs { + assert.NotContains(t, illegalVerbs, verb, "ClusterRole %s should not have %s verb for %s resource", clusterRole.Name, verb, rule.Resources) + } + } + } +} + +func TestServicePortChange(t *testing.T) { + output := helmtestutil.RenderHelmTemplate(t, "stackstate-k8s-agent", "values/minimal.yaml", "values/clustercheck_service_port_override.yaml") + resources := helmtestutil.NewKubernetesResources(t, output) + + cluster_agent_service := resources.Services["stackstate-k8s-agent-cluster-agent"] + + port := cluster_agent_service.Spec.Ports[0] + assert.Equal(t, port.Name, "clusteragent") + assert.Equal(t, port.Port, int32(8008)) + assert.Equal(t, port.TargetPort.IntVal, int32(9009)) +} diff --git a/charts/stackstate/stackstate-k8s-agent/test/clustername_test.go b/charts/stackstate/stackstate-k8s-agent/test/clustername_test.go new file mode 100644 index 000000000..55090b995 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/test/clustername_test.go @@ -0,0 +1,54 @@ +package test + +import ( + "testing" + + "github.com/gruntwork-io/terratest/modules/helm" + "github.com/stretchr/testify/assert" + + "gitlab.com/StackVista/DevOps/helm-charts/helmtestutil" +) + +func TestHelmBasicRender(t *testing.T) { + output := helmtestutil.RenderHelmTemplate(t, "stackstate-k8s-agent", "values/minimal.yaml") + + // Parse all resources into their corresponding types for validation and further inspection + helmtestutil.NewKubernetesResources(t, output) +} + +func TestClusterNameValidation(t *testing.T) { + testCases := []struct { + Name string + ClusterName string + IsValid bool + }{ + {"not allowed end with special character [.]", "name.", false}, + {"not allowed end with special character [-]", "name.", false}, + {"not allowed start with special character [-]", "-name", false}, + {"not allowed start with special character [.]", ".name", false}, + {"upper case is not allowed", "Euwest1-prod.cool-company.com", false}, + {"upper case is not allowed", "euwest1-PROD.cool-company.com", false}, + {"upper case is not allowed", "euwest1-prod.cool-company.coM", false}, + {"dots and dashes are allowed in the middle", "euwest1-prod.cool-company.com", true}, + {"underscore is not allowed", "why_7", false}, + } + + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + output, err := helmtestutil.RenderHelmTemplateOpts( + t, "cluster-agent", + &helm.Options{ + ValuesFiles: []string{"values/minimal.yaml"}, + SetStrValues: map[string]string{ + "stackstate.cluster.name": testCase.ClusterName, + }, + }) + if testCase.IsValid { + assert.Nil(t, err) + } else { + assert.NotNil(t, err) + assert.Contains(t, output, "stackstate.cluster.name: Does not match pattern") + } + }) + } +} diff --git a/charts/stackstate/stackstate-k8s-agent/test/values/clustercheck_ksm_custom_url.yaml b/charts/stackstate/stackstate-k8s-agent/test/values/clustercheck_ksm_custom_url.yaml new file mode 100644 index 000000000..57b973eed --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/test/values/clustercheck_ksm_custom_url.yaml @@ -0,0 +1,7 @@ +checksAgent: + enabled: true + kubeStateMetrics: + url: http://my-custom-ksm-url.monitoring.svc.local:8080/metrics +dependencies: + kubeStateMetrics: + enabled: true diff --git a/charts/stackstate/stackstate-k8s-agent/test/values/clustercheck_ksm_no_override.yaml b/charts/stackstate/stackstate-k8s-agent/test/values/clustercheck_ksm_no_override.yaml new file mode 100644 index 000000000..b6c817d47 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/test/values/clustercheck_ksm_no_override.yaml @@ -0,0 +1,5 @@ +checksAgent: + enabled: true +dependencies: + kubeStateMetrics: + enabled: true diff --git a/charts/stackstate/stackstate-k8s-agent/test/values/clustercheck_ksm_override.yaml b/charts/stackstate/stackstate-k8s-agent/test/values/clustercheck_ksm_override.yaml new file mode 100644 index 000000000..9ca201345 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/test/values/clustercheck_ksm_override.yaml @@ -0,0 +1,26 @@ +checksAgent: + enabled: true +dependencies: + kubeStateMetrics: + enabled: true +agent: + config: + override: +# agent.config.override -- Disables kubernetes_state check on regular agent pods. + - name: auto_conf.yaml + path: /etc/stackstate-agent/conf.d/kubernetes_state.d + data: | +clusterAgent: + config: + override: +# clusterAgent.config.override -- Defines kubernetes_state check for clusterchecks agents. Auto-discovery +# with ad_identifiers does not work here. Use a specific URL instead. + - name: conf.yaml + path: /etc/stackstate-agent/conf.d/kubernetes_state.d + data: | + cluster_check: true + + init_config: + + instances: + - kube_state_url: http://YOUR_KUBE_STATE_METRICS_SERVICE_NAME:8080/metrics diff --git a/charts/stackstate/stackstate-k8s-agent/test/values/clustercheck_no_ksm_custom_url.yaml b/charts/stackstate/stackstate-k8s-agent/test/values/clustercheck_no_ksm_custom_url.yaml new file mode 100644 index 000000000..a62691878 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/test/values/clustercheck_no_ksm_custom_url.yaml @@ -0,0 +1,7 @@ +checksAgent: + enabled: true + kubeStateMetrics: + url: http://my-custom-ksm-url.monitoring.svc.local:8080/metrics +dependencies: + kubeStateMetrics: + enabled: false diff --git a/charts/stackstate/stackstate-k8s-agent/test/values/clustercheck_service_port_override.yaml b/charts/stackstate/stackstate-k8s-agent/test/values/clustercheck_service_port_override.yaml new file mode 100644 index 000000000..c01a98fcb --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/test/values/clustercheck_service_port_override.yaml @@ -0,0 +1,4 @@ +clusterAgent: + service: + port: 8008 + targetPort: 9009 diff --git a/charts/stackstate/stackstate-k8s-agent/test/values/disable-all-resource.yaml b/charts/stackstate/stackstate-k8s-agent/test/values/disable-all-resource.yaml new file mode 100644 index 000000000..cd33e843e --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/test/values/disable-all-resource.yaml @@ -0,0 +1,17 @@ +clusterAgent: + collection: + kubernetesMetrics: false + kubernetesResources: + namespaces: false + configmaps: false + endpoints: false + persistentvolumes: false + persistentvolumeclaims: false + secrets: false + daemonsets: false + deployments: false + replicasets: false + statefulsets: false + ingresses: false + cronjobs: false + jobs: false diff --git a/charts/stackstate/stackstate-k8s-agent/test/values/http-header-injector.yaml b/charts/stackstate/stackstate-k8s-agent/test/values/http-header-injector.yaml new file mode 100644 index 000000000..c9392ce2d --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/test/values/http-header-injector.yaml @@ -0,0 +1,8 @@ +httpHeaderInjectorWebhook: + webhook: + tls: + mode: "provided" + provided: + caBundle: insert-ca-here + crt: insert-cert-here + key: insert-key-here diff --git a/charts/stackstate/stackstate-k8s-agent/test/values/minimal.yaml b/charts/stackstate/stackstate-k8s-agent/test/values/minimal.yaml new file mode 100644 index 000000000..b310c9a09 --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/test/values/minimal.yaml @@ -0,0 +1,7 @@ +stackstate: + apiKey: foobar + cluster: + name: some-k8s-cluster + token: some-token + + url: https://stackstate:7000/receiver diff --git a/charts/stackstate/stackstate-k8s-agent/values.schema.json b/charts/stackstate/stackstate-k8s-agent/values.schema.json new file mode 100644 index 000000000..2b977af3d --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/values.schema.json @@ -0,0 +1,79 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://stackstate.io/example.json", + "type": "object", + "default": {}, + "title": "StackState Agent Helm chart values", + "required": [ + "stackstate", + "clusterAgent" + ], + "properties": { + "stackstate": { + "type": "object", + "required": [ + "apiKey", + "cluster", + "url" + ], + "properties": { + "apiKey": { + "type": "string" + }, + "cluster": { + "type": "object", + "required": ["name"], + "properties": { + "name": { + "type": "string", + "pattern": "^[a-z0-9]([a-z0-9\\-\\.]*[a-z0-9])$" + }, + "authToken": { + "type": "string" + } + } + }, + "url": { + "type": "string" + } + } + }, + "clusterAgent": { + "type": "object", + "required": [ + "config" + ], + "properties": { + "config": { + "type": "object", + "required": [ + "events" + ], + "properties": { + "events": { + "type": "object", + "properties": { + "categories": { + "type": "object", + "patternProperties": { + ".*": { + "type": [ + "string" + ], + "enum": [ + "Alerts", + "Activities", + "Changes", + "Others" + ] + } + } + } + } + } + } + } + } + } + } +} diff --git a/charts/stackstate/stackstate-k8s-agent/values.yaml b/charts/stackstate/stackstate-k8s-agent/values.yaml new file mode 100644 index 000000000..8dc0cb14f --- /dev/null +++ b/charts/stackstate/stackstate-k8s-agent/values.yaml @@ -0,0 +1,545 @@ +##################### +# General variables # +##################### + +global: + extraEnv: + # global.extraEnv.open -- Extra open environment variables to inject into pods. + open: {} + # global.extraEnv.secret -- Extra secret environment variables to inject into pods via a `Secret` object. + secret: {} + # global.imagePullSecrets -- Secrets / credentials needed for container image registry. + imagePullSecrets: [] + # global.imagePullCredentials -- Globally define credentials for pulling images. + imagePullCredentials: {} + +# nameOverride -- Override the name of the chart. +nameOverride: "" +# fullnameOverride -- Override the fullname of the chart. +fullnameOverride: "" + +# targetSystem -- Target OS for this deployment (possible values: linux) +targetSystem: "linux" + +all: + image: + # all.image.registry -- The image registry to use. + registry: "quay.io" + hardening: + # all.hardening.enabled -- An indication of whether the containers will be evaluated for hardening at runtime + enabled: false + +nodeAgent: + containerRuntime: + # nodeAgent.containerRuntime.customSocketPath -- If the container socket path does not match the default for CRI-O, Containerd or Docker, supply a custom socket path. + customSocketPath: "" + # nodeAgent.containerRuntime.customHostProc -- If the container is launched from a place where /proc is mounted differently, /proc can be changed + hostProc: /proc + + scc: + # nodeAgent.scc.enabled -- Enable / disable the installation of the SecurityContextConfiguration needed for installation on OpenShift. + enabled: false + apm: + # nodeAgent.apm.enabled -- Enable / disable the nodeAgent APM module. + enabled: true + networkTracing: + # nodeAgent.networkTracing.enabled -- Enable / disable the nodeAgent network tracing module. + enabled: true + protocolInspection: + # nodeAgent.protocolInspection.enabled -- Enable / disable the nodeAgent protocol inspection. + enabled: true + httpTracing: + enabled: true + # nodeAgent.skipSslValidation -- Set to true if self signed certificates are used. + skipSslValidation: false + # nodeAgent.skipKubeletTLSVerify -- Set to true if you want to skip kubelet tls verification. + skipKubeletTLSVerify: false + + # nodeAgent.checksTagCardinality -- low, orchestrator or high. Orchestrator level adds pod_name, high adds display_container_name + checksTagCardinality: orchestrator + + # nodeAgent.config -- + config: + # nodeAgent.config.override -- A list of objects containing three keys `name`, `path` and `data`, specifying filenames at specific paths which need to be (potentially) overridden using a mounted configmap + override: [] + + # nodeAgent.priorityClassName -- Priority class for nodeAgent pods. + priorityClassName: "" + + containers: + + agent: + image: + # nodeAgent.containers.agent.image.repository -- Base container image repository. + repository: stackstate/stackstate-k8s-agent + # nodeAgent.containers.agent.image.tag -- Default container image tag. + tag: "e36d1c88" + # nodeAgent.containers.agent.image.pullPolicy -- Default container image pull policy. + pullPolicy: IfNotPresent + processAgent: + # nodeAgent.containers.agent.processAgent.enabled -- Enable / disable the agent process agent module. - deprecated + enabled: false + # nodeAgent.containers.agent.env -- Additional environment variables for the agent container + env: {} + # nodeAgent.containers.agent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off + ## If not set, fall back to the value of agent.logLevel. + logLevel: # INFO + + resources: + limits: + # nodeAgent.containers.agent.resources.limits.cpu -- CPU resource limits. + cpu: "270m" + # nodeAgent.containers.agent.resources.limits.cpu -- Memory resource limits. + memory: "420Mi" + requests: + # nodeAgent.containers.agent.resources.requests.cpu -- CPU resource requests. + cpu: "20m" + # nodeAgent.containers.agent.resources.requests.cpu -- Memory resource requests. + memory: "180Mi" + livenessProbe: + # nodeAgent.containers.agent.livenessProbe.enabled -- Enable use of livenessProbe check. + enabled: true + # nodeAgent.containers.agent.livenessProbe.failureThreshold -- `failureThreshold` for the liveness probe. + failureThreshold: 3 + # nodeAgent.containers.agent.livenessProbe.initialDelaySeconds -- `initialDelaySeconds` for the liveness probe. + initialDelaySeconds: 15 + # nodeAgent.containers.agent.livenessProbe.periodSeconds -- `periodSeconds` for the liveness probe. + periodSeconds: 15 + # nodeAgent.containers.agent.livenessProbe.successThreshold -- `successThreshold` for the liveness probe. + successThreshold: 1 + # nodeAgent.containers.agent.livenessProbe.timeoutSeconds -- `timeoutSeconds` for the liveness probe. + timeoutSeconds: 5 + readinessProbe: + # nodeAgent.containers.agent.readinessProbe.enabled -- Enable use of readinessProbe check. + enabled: true + # nodeAgent.containers.agent.readinessProbe.failureThreshold -- `failureThreshold` for the readiness probe. + failureThreshold: 3 + # nodeAgent.containers.agent.readinessProbe.initialDelaySeconds -- `initialDelaySeconds` for the readiness probe. + initialDelaySeconds: 15 + # nodeAgent.containers.agent.readinessProbe.periodSeconds -- `periodSeconds` for the readiness probe. + periodSeconds: 15 + # nodeAgent.containers.agent.readinessProbe.successThreshold -- `successThreshold` for the readiness probe. + successThreshold: 1 + # nodeAgent.containers.agent.readinessProbe.timeoutSeconds -- `timeoutSeconds` for the readiness probe. + timeoutSeconds: 5 + + processAgent: + # nodeAgent.containers.processAgent.enabled -- Enable / disable the process agent container. + enabled: true + image: + # Override to pull the image from an alternate registry + registry: + # nodeAgent.containers.processAgent.image.repository -- Process-agent container image repository. + repository: stackstate/stackstate-k8s-process-agent + # nodeAgent.containers.processAgent.image.tag -- Default process-agent container image tag. + tag: "c9dbfd73" + # nodeAgent.containers.processAgent.image.pullPolicy -- Process-agent container image pull policy. + pullPolicy: IfNotPresent + # nodeAgent.containers.processAgent.env -- Additional environment variables for the process-agent container + env: {} + # nodeAgent.containers.processAgent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off + ## If not set, fall back to the value of agent.logLevel. + logLevel: # INFO + + resources: + limits: + # nodeAgent.containers.processAgent.resources.limits.cpu -- CPU resource limits. + cpu: "125m" + # nodeAgent.containers.processAgent.resources.limits.cpu -- Memory resource limits. + memory: "400Mi" + requests: + # nodeAgent.containers.processAgent.resources.requests.cpu -- CPU resource requests. + cpu: "25m" + # nodeAgent.containers.processAgent.resources.requests.cpu -- Memory resource requests. + memory: "128Mi" + # nodeAgent.service -- The Kubernetes service for the agent + service: + # nodeAgent.service.type -- Type of Kubernetes service: ClusterIP, LoadBalancer, NodePort + type: ClusterIP + # nodeAgent.service.annotations -- Annotations for the service + annotations: {} + # nodeAgent.service.loadBalancerSourceRanges -- The IP4 CIDR allowed to reach LoadBalancer for the service. For LoadBalancer type of service only. + loadBalancerSourceRanges: ["10.0.0.0/8"] + + # nodeAgent.logLevel -- Logging level for agent processes. + logLevel: INFO + + # nodeAgent.updateStrategy -- The update strategy for the DaemonSet object. + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 100 + + # nodeAgent.nodeSelector -- Node labels for pod assignment. + nodeSelector: {} + + # nodeAgent.tolerations -- Toleration labels for pod assignment. + tolerations: [] + + # nodeAgent.affinity -- Affinity settings for pod assignment. + affinity: {} + + serviceaccount: + # nodeAgent.serviceaccount.annotations -- Annotations for the service account for the agent daemonset pods + annotations: {} + +processAgent: + checkIntervals: + # processAgent.checkIntervals.container -- Override the default value of the container check interval in seconds. + container: 30 + # processAgent.checkIntervals.connections -- Override the default value of the connections check interval in seconds. + connections: 30 + # processAgent.checkIntervals.process -- Override the default value of the process check interval in seconds. + process: 30 + +clusterAgent: + collection: + # clusterAgent.collection.kubernetesEvents -- Enable / disable the cluster agent events collection. + kubernetesEvents: true + # clusterAgent.collection.kubernetesMetrics -- Enable / disable the cluster agent metrics collection. + kubernetesMetrics: true + # clusterAgent.collection.kubernetesTimeout -- Default timeout (in seconds) when obtaining information from the Kubernetes API. + kubernetesTimeout: 10 + # clusterAgent.collection.kubernetesTopology -- Enable / disable the cluster agent topology collection. + kubernetesTopology: true + kubeStateMetrics: + # clusterAgent.collection.kubeStateMetrics.enabled -- Enable / disable the cluster agent kube-state-metrics collection. + enabled: true + # clusterAgent.collection.kubeStateMetrics.clusterCheck -- For large clusters where the Kubernetes State Metrics Check Core needs to be distributed on dedicated workers. + clusterCheck: false + # clusterAgent.collection.kubeStateMetrics.labelsAsTags -- Extra labels to collect from resources and to turn into StackState tag. + ## It has the following structure: + ## labelsAsTags: + ## : # can be pod, deployment, node, etc. + ## : # where is the kubernetes label and is the StackState tag + ## : + ## : + ## : + ## + ## Warning: the label must match the transformation done by kube-state-metrics, + ## for example tags.stackstate/version becomes tags_stackstate_version. + labelsAsTags: {} + # pod: + # app: app + # node: + # zone: zone + # team: team + + # clusterAgent.collection.kubeStateMetrics.annotationsAsTags -- Extra annotations to collect from resources and to turn into StackState tag. + + ## It has the following structure: + ## annotationsAsTags: + ## : # can be pod, deployment, node, etc. + ## : # where is the kubernetes annotation and is the StackState tag + ## : + ## : + ## : + ## + ## Warning: the annotation must match the transformation done by kube-state-metrics, + ## for example tags.stackstate/version becomes tags_stackstate_version. + annotationsAsTags: {} + kubernetesResources: + # clusterAgent.collection.kubernetesResources.volumeattachments -- Enable / disable collection of Volume Attachments. Used to bind Nodes to Persistent Volumes. + volumeattachments: true + # clusterAgent.collection.kubernetesResources.namespaces -- Enable / disable collection of Namespaces. + namespaces: true + # clusterAgent.collection.kubernetesResources.configmaps -- Enable / disable collection of ConfigMaps. + configmaps: true + # clusterAgent.collection.kubernetesResources.endpoints -- Enable / disable collection of Endpoints. If endpoints are disabled then StackState won't be able to connect a Service to Pods that serving it + endpoints: true + # clusterAgent.collection.kubernetesResources.persistentvolumes -- Enable / disable collection of PersistentVolumes. + persistentvolumes: true + # clusterAgent.collection.kubernetesResources.persistentvolumeclaims -- Enable / disable collection of PersistentVolumeClaims. Disabling these will not let StackState connect PersistentVolumes to pods they are attached to + persistentvolumeclaims: true + # clusterAgent.collection.kubernetesResources.secrets -- Enable / disable collection of Secrets. + secrets: true + # clusterAgent.collection.kubernetesResources.daemonsets -- Enable / disable collection of DaemonSets. + daemonsets: true + # clusterAgent.collection.kubernetesResources.deployments -- Enable / disable collection of Deployments. + deployments: true + # clusterAgent.collection.kubernetesResources.replicasets -- Enable / disable collection of ReplicaSets. + replicasets: true + # clusterAgent.collection.kubernetesResources.statefulsets -- Enable / disable collection of StatefulSets. + statefulsets: true + # clusterAgent.collection.kubernetesResources.ingresses -- Enable / disable collection of Ingresses. + ingresses: true + # clusterAgent.collection.kubernetesResources.cronjobs -- Enable / disable collection of CronJobs. + cronjobs: true + # clusterAgent.collection.kubernetesResources.jobs -- Enable / disable collection of Jobs. + jobs: true + # clusterAgent.collection.kubernetesResources.resourcequotas -- Enable / disable collection of ResourceQuotas. + resourcequotas: true + + # clusterAgent.config -- + config: + events: + # clusterAgent.config.events.categories -- Custom mapping from Kubernetes event reason to StackState event category. Categories allowed: Alerts, Activities, Changes, Others + categories: {} + topology: + # clusterAgent.config.topology.collectionInterval -- Interval for running topology collection, in seconds + collectionInterval: 90 + configMap: + # clusterAgent.config.configMap.maxDataSize -- Maximum amount of characters for the data property of a ConfigMap collected by the kubernetes topology check + maxDataSize: + # clusterAgent.config.override -- A list of objects containing three keys `name`, `path` and `data`, specifying filenames at specific paths which need to be (potentially) overridden using a mounted configmap + override: [] + + service: + # clusterAgent.service.port -- Change the Cluster Agent service port + port: 5005 + # clusterAgent.service.targetPort -- Change the Cluster Agent service targetPort + targetPort: 5005 + + # clusterAgent.enabled -- Enable / disable the cluster agent. + enabled: true + + image: + # clusterAgent.image.repository -- Base container image repository. + repository: stackstate/stackstate-k8s-cluster-agent + # clusterAgent.image.tag -- Default container image tag. + tag: "e36d1c88" + # clusterAgent.image.pullPolicy -- Default container image pull policy. + pullPolicy: IfNotPresent + + livenessProbe: + # clusterAgent.livenessProbe.enabled -- Enable use of livenessProbe check. + enabled: true + # clusterAgent.livenessProbe.failureThreshold -- `failureThreshold` for the liveness probe. + failureThreshold: 3 + # clusterAgent.livenessProbe.initialDelaySeconds -- `initialDelaySeconds` for the liveness probe. + initialDelaySeconds: 15 + # clusterAgent.livenessProbe.periodSeconds -- `periodSeconds` for the liveness probe. + periodSeconds: 15 + # clusterAgent.livenessProbe.successThreshold -- `successThreshold` for the liveness probe. + successThreshold: 1 + # clusterAgent.livenessProbe.timeoutSeconds -- `timeoutSeconds` for the liveness probe. + timeoutSeconds: 5 + + # clusterAgent.logLevel -- Logging level for stackstate-k8s-agent processes. + logLevel: INFO + + # clusterAgent.priorityClassName -- Priority class for stackstate-k8s-agent pods. + priorityClassName: "" + + readinessProbe: + # clusterAgent.readinessProbe.enabled -- Enable use of readinessProbe check. + enabled: true + # clusterAgent.readinessProbe.failureThreshold -- `failureThreshold` for the readiness probe. + failureThreshold: 3 + # clusterAgent.readinessProbe.initialDelaySeconds -- `initialDelaySeconds` for the readiness probe. + initialDelaySeconds: 15 + # clusterAgent.readinessProbe.periodSeconds -- `periodSeconds` for the readiness probe. + periodSeconds: 15 + # clusterAgent.readinessProbe.successThreshold -- `successThreshold` for the readiness probe. + successThreshold: 1 + # clusterAgent.readinessProbe.timeoutSeconds -- `timeoutSeconds` for the readiness probe. + timeoutSeconds: 5 + + # clusterAgent.replicaCount -- Number of replicas of the cluster agent to deploy. + replicaCount: 1 + + serviceaccount: + # clusterAgent.serviceaccount.annotations -- Annotations for the service account for the cluster agent pods + annotations: {} + + # clusterAgent.strategy -- The strategy for the Deployment object. + strategy: + type: RollingUpdate + # rollingUpdate: + # maxUnavailable: 1 + + resources: + limits: + # clusterAgent.resources.limits.cpu -- CPU resource limits. + cpu: "400m" + # clusterAgent.resources.limits.memory -- Memory resource limits. + memory: "800Mi" + requests: + # clusterAgent.resources.requests.cpu -- CPU resource requests. + cpu: "70m" + # clusterAgent.resources.requests.memory -- Memory resource requests. + memory: "512Mi" + + # clusterAgent.nodeSelector -- Node labels for pod assignment. + nodeSelector: {} + + # clusterAgent.tolerations -- Toleration labels for pod assignment. + tolerations: [] + + # clusterAgent.affinity -- Affinity settings for pod assignment. + affinity: {} + +openShiftLogging: + # openShiftLogging.installSecret -- Install a secret for logging on openshift + installSecret: false + +logsAgent: + # logsAgent.enabled -- Enable / disable k8s pod log collection + enabled: true + + # logsAgent.priorityClassName -- Priority class for logsAgent pods. + priorityClassName: "" + + image: + # logsAgent.image.repository -- Base container image repository. + repository: stackstate/promtail + # logsAgent.image.tag -- Default container image tag. + tag: 2.7.1 + # logsAgent.image.pullPolicy -- Default container image pull policy. + pullPolicy: IfNotPresent + + resources: + limits: + # logsAgent.resources.limits.cpu -- CPU resource limits. + cpu: "1300m" + # logsAgent.resources.limits.cpu -- Memory resource limits. + memory: "192Mi" + requests: + # logsAgent.resources.requests.cpu -- CPU resource requests. + cpu: "20m" + # logsAgent.resources.requests.cpu -- Memory resource requests. + memory: "100Mi" + + # logsAgent.updateStrategy -- The update strategy for the DaemonSet object. + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 100 + + # logsAgent.nodeSelector -- Node labels for pod assignment. + nodeSelector: {} + + # logsAgent.tolerations -- Toleration labels for pod assignment. + tolerations: [] + + # logsAgent.affinity -- Affinity settings for pod assignment. + affinity: {} + + serviceaccount: + # logsAgent.serviceaccount.annotations -- Annotations for the service account for the daemonset pods + annotations: {} + +checksAgent: + # checksAgent.enabled -- Enable / disable runnning cluster checks in a separately deployed pod + enabled: true + scc: + # checksAgent.scc.enabled -- Enable / disable the installation of the SecurityContextConfiguration needed for installation on OpenShift + enabled: false + apm: + # checksAgent.apm.enabled -- Enable / disable the agent APM module. + enabled: true + networkTracing: + # checksAgent.networkTracing.enabled -- Enable / disable the agent network tracing module. + enabled: true + processAgent: + # checksAgent.processAgent.enabled -- Enable / disable the agent process agent module. + enabled: true + # checksAgent.skipSslValidation -- Set to true if self signed certificates are used. + skipSslValidation: false + + # nodeAgent.checksTagCardinality -- low, orchestrator or high. Orchestrator level adds pod_name, high adds display_container_name + checksTagCardinality: orchestrator + + # checksAgent.config -- + config: + # checksAgent.config.override -- A list of objects containing three keys `name`, `path` and `data`, specifying filenames at specific paths which need to be (potentially) overridden using a mounted configmap + override: [] + + image: + # checksAgent.image.repository -- Base container image repository. + repository: stackstate/stackstate-k8s-agent + # checksAgent.image.tag -- Default container image tag. + tag: "e36d1c88" + # checksAgent.image.pullPolicy -- Default container image pull policy. + pullPolicy: IfNotPresent + + livenessProbe: + # checksAgent.livenessProbe.enabled -- Enable use of livenessProbe check. + enabled: true + # checksAgent.livenessProbe.failureThreshold -- `failureThreshold` for the liveness probe. + failureThreshold: 3 + # checksAgent.livenessProbe.initialDelaySeconds -- `initialDelaySeconds` for the liveness probe. + initialDelaySeconds: 15 + # checksAgent.livenessProbe.periodSeconds -- `periodSeconds` for the liveness probe. + periodSeconds: 15 + # checksAgent.livenessProbe.successThreshold -- `successThreshold` for the liveness probe. + successThreshold: 1 + # checksAgent.livenessProbe.timeoutSeconds -- `timeoutSeconds` for the liveness probe. + timeoutSeconds: 5 + + # checksAgent.logLevel -- Logging level for clusterchecks agent processes. + logLevel: INFO + + # checksAgent.priorityClassName -- Priority class for clusterchecks agent pods. + priorityClassName: "" + + readinessProbe: + # checksAgent.readinessProbe.enabled -- Enable use of readinessProbe check. + enabled: true + # checksAgent.readinessProbe.failureThreshold -- `failureThreshold` for the readiness probe. + failureThreshold: 3 + # checksAgent.readinessProbe.initialDelaySeconds -- `initialDelaySeconds` for the readiness probe. + initialDelaySeconds: 15 + # checksAgent.readinessProbe.periodSeconds -- `periodSeconds` for the readiness probe. + periodSeconds: 15 + # checksAgent.readinessProbe.successThreshold -- `successThreshold` for the readiness probe. + successThreshold: 1 + # checksAgent.readinessProbe.timeoutSeconds -- `timeoutSeconds` for the readiness probe. + timeoutSeconds: 5 + + # checksAgent.replicas -- Number of clusterchecks agent pods to schedule + replicas: 1 + + resources: + limits: + # checksAgent.resources.limits.cpu -- CPU resource limits. + cpu: "400m" + # checksAgent.resources.limits.cpu -- Memory resource limits. + memory: "600Mi" + requests: + # checksAgent.resources.requests.cpu -- CPU resource requests. + cpu: "20m" + # checksAgent.resources.requests.cpu -- Memory resource requests. + memory: "512Mi" + + serviceaccount: + # checksAgent.serviceaccount.annotations -- Annotations for the service account for the cluster checks pods + annotations: {} + + # checksAgent.strategy -- The strategy for the Deployment object. + strategy: + type: RollingUpdate + # rollingUpdate: + # maxUnavailable: 1 + + # checksAgent.nodeSelector -- Node labels for pod assignment. + nodeSelector: {} + + # checksAgent.tolerations -- Toleration labels for pod assignment. + tolerations: [] + + # checksAgent.affinity -- Affinity settings for pod assignment. + affinity: {} + +################################## +# http-header-injector variables # +################################## + +httpHeaderInjectorWebhook: + # httpHeaderInjectorWebhook.enabled -- Enable the webhook for injection http header injection sidecar proxy + enabled: false + +######################## +# StackState variables # +######################## + +stackstate: + # stackstate.apiKey -- (string) **PROVIDE YOUR API KEY HERE** API key to be used by the StackState agent. + apiKey: + cluster: + # stackstate.cluster.name -- (string) **PROVIDE KUBERNETES CLUSTER NAME HERE** Name of the Kubernetes cluster where the agent will be installed. + name: + # stackstate.cluster.authToken -- Provide a token to enable secure communication between the agent and the cluster agent. + authToken: "" + # stackstate.url -- (string) **PROVIDE STACKSTATE URL HERE** URL of the StackState installation to receive data from the agent. + url: diff --git a/index.yaml b/index.yaml index 7434e0db7..d774e3381 100644 --- a/index.yaml +++ b/index.yaml @@ -5066,6 +5066,39 @@ entries: - assets/argo/argo-cd-5.8.0.tgz version: 5.8.0 artifactory-ha: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: JFrog Artifactory HA + catalog.cattle.io/kube-version: '>= 1.14.0-0' + catalog.cattle.io/release-name: artifactory-ha + apiVersion: v2 + appVersion: 7.68.13 + created: "2023-10-04T15:49:03.34110471Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: file://./charts/postgresql + version: 10.3.18 + description: Universal Repository Manager supporting all major packaging formats, + build tools and CI servers. + digest: 9da7488c428e7ea2ad2e8099b8367812dfc154e7214dcde566a0c68cab250e28 + home: https://www.jfrog.com/artifactory/ + icon: https://raw.githubusercontent.com/jfrog/charts/ea5c3112c24a973f64f3ccd99747323db292a369/stable/artifactory-ha/logo/artifactory-logo.png + keywords: + - artifactory + - jfrog + - devops + kubeVersion: '>= 1.14.0-0' + maintainers: + - email: installers@jfrog.com + name: Chart Maintainers at JFrog + name: artifactory-ha + sources: + - https://github.com/jfrog/charts + type: application + urls: + - assets/jfrog/artifactory-ha-107.68.13.tgz + version: 107.68.13 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: JFrog Artifactory HA @@ -6347,6 +6380,40 @@ entries: - assets/jfrog/artifactory-ha-3.0.1400.tgz version: 3.0.1400 artifactory-jcr: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: JFrog Container Registry + catalog.cattle.io/kube-version: '>= 1.14.0-0' + catalog.cattle.io/release-name: artifactory-jcr + apiVersion: v2 + appVersion: 7.68.13 + created: "2023-10-04T15:49:03.783810401Z" + dependencies: + - name: artifactory + repository: file://./charts/artifactory + version: 107.68.13 + description: JFrog Container Registry + digest: 7bdaa37d12597800d2d9cdd3561784caff06e1ba0a05d9a658ad7725f2bb833c + home: https://jfrog.com/container-registry/ + icon: https://raw.githubusercontent.com/jfrog/charts/ea5c3112c24a973f64f3ccd99747323db292a369/stable/artifactory-jcr/logo/jcr-logo.png + keywords: + - artifactory + - jfrog + - container + - registry + - devops + - jfrog-container-registry + kubeVersion: '>= 1.14.0-0' + maintainers: + - email: helm@jfrog.com + name: Chart Maintainers at JFrog + name: artifactory-jcr + sources: + - https://github.com/jfrog/charts + type: application + urls: + - assets/jfrog/artifactory-jcr-107.68.13.tgz + version: 107.68.13 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: JFrog Container Registry @@ -12647,6 +12714,27 @@ entries: - assets/cloudcasa/cloudcasa-0.1.000.tgz version: 0.1.000 cockroachdb: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: CockroachDB + catalog.cattle.io/kube-version: '>=1.8-0' + catalog.cattle.io/release-name: cockroachdb + apiVersion: v1 + appVersion: 23.1.11 + created: "2023-10-04T15:49:01.022346207Z" + description: CockroachDB is a scalable, survivable, strongly-consistent SQL database. + digest: 054e62dff4ecd4a0e67c23226febc0e4dbf671b986e0f7ab45fd6aa6ff98c270 + home: https://www.cockroachlabs.com + icon: https://raw.githubusercontent.com/cockroachdb/cockroach/master/docs/media/cockroach_db.png + maintainers: + - email: helm-charts@cockroachlabs.com + name: cockroachlabs + name: cockroachdb + sources: + - https://github.com/cockroachdb/cockroach + urls: + - assets/cockroach-labs/cockroachdb-11.2.1.tgz + version: 11.2.1 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: CockroachDB @@ -13424,6 +13512,32 @@ entries: - assets/mongodb/community-operator-0.7.6.tgz version: 0.7.6 confluent-for-kubernetes: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Confluent For Kubernetes + catalog.cattle.io/kube-version: '>=1.15-0' + catalog.cattle.io/release-name: confluent-for-kubernetes + apiVersion: v1 + appVersion: 2.7.0 + created: "2023-10-04T15:49:01.140876383Z" + description: A Helm chart to deploy Confluent for Kubernetes + digest: a06930ec1bbfdfce196741f9a349d423b8623517ad32c2543d63319db0086cc9 + home: https://www.confluent.io/ + icon: https://cdn.confluent.io/wp-content/uploads/seo-logo-meadow.png + keywords: + - Confluent + - Confluent Operator + - Confluent Platform + - CFK + maintainers: + - email: operator@confluent.io + name: Confluent Operator + name: confluent-for-kubernetes + sources: + - https://docs.confluent.io/current/index.html + urls: + - assets/confluent/confluent-for-kubernetes-0.824.14.tgz + version: 0.824.14 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Confluent For Kubernetes @@ -16322,6 +16436,43 @@ entries: - assets/weka/csi-wekafsplugin-0.6.400.tgz version: 0.6.400 datadog: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Datadog + catalog.cattle.io/kube-version: '>=1.10-0' + catalog.cattle.io/release-name: datadog + apiVersion: v1 + appVersion: "7" + created: "2023-10-04T15:49:01.749741223Z" + dependencies: + - condition: clusterAgent.metricsProvider.useDatadogMetrics + name: datadog-crds + repository: https://helm.datadoghq.com + tags: + - install-crds + version: 1.0.1 + - condition: datadog.kubeStateMetricsEnabled + name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 2.13.2 + description: Datadog Agent + digest: ec4d9baeba62d44684df7cdae4407a0099e9881d9e8436c238767b54121a580d + home: https://www.datadoghq.com + icon: https://datadog-live.imgix.net/img/dd_logo_70x75.png + keywords: + - monitoring + - alerting + - metric + maintainers: + - email: support@datadoghq.com + name: Datadog + name: datadog + sources: + - https://app.datadoghq.com/account/settings#agent/kubernetes + - https://github.com/DataDog/datadog-agent + urls: + - assets/datadog/datadog-3.38.4.tgz + version: 3.38.4 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Datadog @@ -18748,6 +18899,39 @@ entries: - assets/datadog/datadog-2.4.200.tgz version: 2.4.200 datadog-operator: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Datadog Operator + catalog.cattle.io/release-name: datadog-operator + apiVersion: v2 + appVersion: 1.1.0 + created: "2023-10-04T15:49:01.895002864Z" + dependencies: + - alias: datadogCRDs + condition: installCRDs + name: datadog-crds + repository: file://./charts/datadog-crds + tags: + - install-crds + version: =1.1.0 + description: Datadog Operator + digest: 2317f06e87036a89b0e04c0301892bebab2b5bbb3dcf9030e2dd0d5f936d8dc1 + home: https://www.datadoghq.com + icon: https://datadog-live.imgix.net/img/dd_logo_70x75.png + keywords: + - monitoring + - alerting + - metric + maintainers: + - email: support@datadoghq.com + name: Datadog + name: datadog-operator + sources: + - https://app.datadoghq.com/account/settings#agent/kubernetes + - https://github.com/DataDog/datadog-agent + urls: + - assets/datadog/datadog-operator-1.1.2.tgz + version: 1.1.2 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Datadog Operator @@ -29216,6 +29400,58 @@ entries: - assets/kasten/k10-4.5.900.tgz version: 4.5.900 kafka: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Apache Kafka + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: kafka + category: Infrastructure + images: | + - name: jmx-exporter + image: docker.io/bitnami/jmx-exporter:0.19.0-debian-11-r84 + - name: kafka-exporter + image: docker.io/bitnami/kafka-exporter:1.7.0-debian-11-r120 + - name: kafka + image: docker.io/bitnami/kafka:3.5.1-debian-11-r61 + - name: kubectl + image: docker.io/bitnami/kubectl:1.28.2-debian-11-r2 + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r77 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 3.5.1 + created: "2023-10-04T15:48:58.667461147Z" + dependencies: + - condition: zookeeper.enabled + name: zookeeper + repository: file://./charts/zookeeper + version: 12.x.x + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Apache Kafka is a distributed streaming platform designed to build + real-time pipelines and can be used as a message broker or as a replacement + for a log aggregation solution for big data applications. + digest: 523d31ee7e05e5912e48e47bc356c7be9ec6aaa36988afe8823dc78356e83919 + home: https://bitnami.com + icon: https://svn.apache.org/repos/asf/comdev/project-logos/originals/kafka.svg + keywords: + - kafka + - zookeeper + - streaming + - producer + - consumer + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: kafka + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/kafka + urls: + - assets/bitnami/kafka-25.3.0.tgz + version: 25.3.0 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Apache Kafka @@ -32037,6 +32273,33 @@ entries: - assets/elastic/kibana-7.17.3.tgz version: 7.17.3 kong: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Kong Gateway + catalog.cattle.io/release-name: kong + apiVersion: v2 + appVersion: "3.4" + created: "2023-10-04T15:49:04.694923539Z" + dependencies: + - condition: postgresql.enabled + name: postgresql + repository: file://./charts/postgresql + version: 11.9.13 + description: The Cloud-Native Ingress and API-management + digest: 83dec46bb9870001e291e1197f6314657b0d0ba9354faa0161d6c07794622568 + home: https://konghq.com/ + icon: https://s3.amazonaws.com/downloads.kong/universe/assets/icon-kong-inc-large.png + maintainers: + - email: harry@konghq.com + name: hbagdi + - email: traines@konghq.com + name: rainest + name: kong + sources: + - https://github.com/Kong/charts/tree/main/charts/kong + urls: + - assets/kong/kong-2.28.1.tgz + version: 2.28.1 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Kong Gateway @@ -33061,6 +33324,36 @@ entries: urls: - assets/kubemq/kubemq-crds-2.3.7.tgz version: 2.3.7 + kubernetes-ingress-controller: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: ngrok Ingress Controller + catalog.cattle.io/release-name: kubernetes-ingress-controller + apiVersion: v2 + appVersion: 0.9.0 + created: "2023-10-04T15:49:05.843982738Z" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: A Kubernetes ingress controller built using ngrok. + digest: abf93290f193ec8cc7b0ae14adadf09a86bdbc1b26275824aa6f8f068a023e83 + home: https://ngrok.com + icon: https://assets-global.website-files.com/63ed4bc7a4b189da942a6b8c/6411ffa0b395a44345ed2b1a_Frame%201.svg + keywords: + - ngrok + - networking + - ingress + - edge + - api gateway + name: kubernetes-ingress-controller + sources: + - https://github.com/ngrok/kubernetes-ingress-controller + urls: + - assets/ngrok/kubernetes-ingress-controller-0.11.0.tgz + version: 0.11.0 kubeslice-controller: - annotations: catalog.cattle.io/certified: partner @@ -45996,6 +46289,50 @@ entries: - assets/quobyte/quobyte-cluster-0.1.5.tgz version: 0.1.5 redis: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Redis + catalog.cattle.io/kube-version: '>=1.19-0' + catalog.cattle.io/release-name: redis + category: Database + images: | + - name: os-shell + image: docker.io/bitnami/os-shell:11-debian-11-r60 + - name: redis-exporter + image: docker.io/bitnami/redis-exporter:1.54.0-debian-11-r0 + - name: redis-sentinel + image: docker.io/bitnami/redis-sentinel:7.2.1-debian-11-r0 + - name: redis + image: docker.io/bitnami/redis:7.2.1-debian-11-r0 + licenses: Apache-2.0 + apiVersion: v2 + appVersion: 7.2.1 + created: "2023-10-04T15:48:59.56901939Z" + dependencies: + - name: common + repository: file://./charts/common + tags: + - bitnami-common + version: 2.x.x + description: Redis(R) is an open source, advanced key-value store. It is often + referred to as a data structure server since keys can contain strings, hashes, + lists, sets and sorted sets. + digest: 67f9730f6fc97bb52542a8cdfcc1c384e2d86e3b811b42b23416aa1de55bf1ce + home: https://bitnami.com + icon: https://redis.com/wp-content/uploads/2021/08/redis-logo.png + keywords: + - redis + - keyvalue + - database + maintainers: + - name: VMware, Inc. + url: https://github.com/bitnami/charts + name: redis + sources: + - https://github.com/bitnami/charts/tree/main/bitnami/redis + urls: + - assets/bitnami/redis-18.1.2.tgz + version: 18.1.2 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Redis @@ -47920,6 +48257,50 @@ entries: - assets/bitnami/redis-17.3.7.tgz version: 17.3.7 redpanda: + - annotations: + artifacthub.io/images: | + - name: redpanda + image: docker.redpanda.com/redpandadata/redpanda:v23.2.9 + - name: busybox + image: busybox:latest + - name: mintel/docker-alpine-bash-curl-jq + image: mintel/docker-alpine-bash-curl-jq:latest + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Documentation + url: https://docs.redpanda.com + - name: "Helm (>= 3.6.0)" + url: https://helm.sh/docs/intro/install/ + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Redpanda + catalog.cattle.io/kube-version: '>=1.21-0' + catalog.cattle.io/release-name: redpanda + apiVersion: v2 + appVersion: v23.2.9 + created: "2023-10-04T15:49:06.707026594Z" + dependencies: + - condition: console.enabled + name: console + repository: file://./charts/console + version: '>=0.5 <1.0' + - condition: connectors.enabled + name: connectors + repository: file://./charts/connectors + version: '>=0.1.2 <1.0' + description: Redpanda is the real-time engine for modern apps. + digest: f0c19321330170bedf33eddff26a5897ee785524b14d98457bec623a5ee77b8b + icon: https://images.ctfassets.net/paqvtpyf8rwu/3cYHw5UzhXCbKuR24GDFGO/73fb682e6157d11c10d5b2b5da1d5af0/skate-stand-panda.svg + kubeVersion: '>=1.21-0' + maintainers: + - name: redpanda-data + url: https://github.com/orgs/redpanda-data/people + name: redpanda + sources: + - https://github.com/redpanda-data/helm-charts + type: application + urls: + - assets/redpanda/redpanda-5.6.0.tgz + version: 5.6.0 - annotations: artifacthub.io/images: | - name: redpanda @@ -54904,6 +55285,35 @@ entries: urls: - assets/speedscale/speedscale-operator-0.9.12600.tgz version: 0.9.12600 + stackstate-k8s-agent: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: StackState Agent + catalog.cattle.io/kube-version: '>=1.19.0-0' + catalog.cattle.io/release-name: stackstate-k8s-agent + apiVersion: v2 + appVersion: 2.19.1 + created: "2023-10-04T15:49:06.818791653Z" + dependencies: + - alias: httpHeaderInjectorWebhook + name: http-header-injector + repository: file://./charts/http-header-injector + version: 0.0.6 + description: Helm chart for the StackState Agent. + digest: d57575d97f17c18321e03576fb1ce13544eca5ea415eaa3963764fff525d2b4e + home: https://github.com/StackVista/stackstate-agent + icon: https://raw.githubusercontent.com/StackVista/helm-charts/master/stable/stackstate-k8s-agent/logo.svg + keywords: + - monitoring + - observability + - stackstate + maintainers: + - email: ops@stackstate.com + name: Stackstate + name: stackstate-k8s-agent + urls: + - assets/stackstate/stackstate-k8s-agent-1.0.49.tgz + version: 1.0.49 sumologic: - annotations: catalog.cattle.io/certified: partner @@ -59498,6 +59908,33 @@ entries: - assets/universal-crossplane/universal-crossplane-1.2.200100.tgz version: 1.2.200100 vals-operator: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: Vals-Operator + catalog.cattle.io/kube-version: '>= 1.19.0-0' + catalog.cattle.io/release-name: vals-operator + apiVersion: v2 + appVersion: v0.7.7 + created: "2023-10-04T15:49:01.931333925Z" + description: 'This helm chart installs the Digitalis Vals Operator to manage and + sync secrets from supported backends into Kubernetes. ## About Vals-Operator + Here at [Digitalis](https://digitalis.io) we love [vals](https://github.com/helmfile/vals), + it''s a tool we use daily to keep secrets stored securely. Inspired by this + tool, we have created an operator to manage Kubernetes secrets. *vals-operator* + syncs secrets from any secrets store supported by [vals](https://github.com/helmfile/vals) + into Kubernetes. Also, `vals-operator` supports database secrets as provider + by [HashiCorp Vault Secret Engine](https://developer.hashicorp.com/vault/docs/secrets/databases). ' + digest: cef6777ac9fd519ee246543e7672deeb349561447992d8363b806c76d083b005 + icon: https://digitalis.io/wp-content/uploads/2020/06/cropped-Digitalis-512x512-Blue_Digitalis-512x512-Blue-32x32.png + kubeVersion: '>= 1.19.0-0' + maintainers: + - email: info@digitalis.io + name: Digitalis.IO + name: vals-operator + type: application + urls: + - assets/digitalis/vals-operator-0.7.7.tgz + version: 0.7.7 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: Vals-Operator