diff --git a/packages/rancher-logging/package.yaml b/packages/rancher-logging/package.yaml index 7c417f8f0..65caffcda 100644 --- a/packages/rancher-logging/package.yaml +++ b/packages/rancher-logging/package.yaml @@ -1,5 +1,5 @@ url: https://kubernetes-charts.banzaicloud.com/charts/logging-operator-3.10.0.tgz -packageVersion: 1 +packageVersion: 0 additionalCharts: - workingDir: charts-crd crdOptions: diff --git a/packages/rancher-logging/templates/crd-template/Chart.yaml b/packages/rancher-logging/templates/crd-template/Chart.yaml index f40341a4c..a9aeb4be3 100644 --- a/packages/rancher-logging/templates/crd-template/Chart.yaml +++ b/packages/rancher-logging/templates/crd-template/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -version: 3.9.4 +version: 3.10.0 description: Installs the CRDs for rancher-logging. name: rancher-logging-crd type: application diff --git a/packages/rancher-monitoring/generated-changes/dependencies/rke2IngressNginx/dependency.yaml b/packages/rancher-monitoring/generated-changes/dependencies/rke2IngressNginx/dependency.yaml new file mode 100644 index 000000000..136939aec --- /dev/null +++ b/packages/rancher-monitoring/generated-changes/dependencies/rke2IngressNginx/dependency.yaml @@ -0,0 +1,2 @@ +workingDir: "" +url: packages/rancher-pushprox diff --git a/packages/rancher-monitoring/generated-changes/dependencies/rkeIngressNginx/dependency.yaml b/packages/rancher-monitoring/generated-changes/dependencies/rkeIngressNginx/dependency.yaml new file mode 100644 index 000000000..136939aec --- /dev/null +++ b/packages/rancher-monitoring/generated-changes/dependencies/rkeIngressNginx/dependency.yaml @@ -0,0 +1,2 @@ +workingDir: "" +url: packages/rancher-pushprox diff --git a/packages/rancher-monitoring/generated-changes/overlay/templates/rancher-monitoring/dashboards/addons/ingress-nginx-dashboard.yaml b/packages/rancher-monitoring/generated-changes/overlay/templates/rancher-monitoring/dashboards/addons/ingress-nginx-dashboard.yaml index 20c57dd2a..7b51a0bf7 100644 --- a/packages/rancher-monitoring/generated-changes/overlay/templates/rancher-monitoring/dashboards/addons/ingress-nginx-dashboard.yaml +++ b/packages/rancher-monitoring/generated-changes/overlay/templates/rancher-monitoring/dashboards/addons/ingress-nginx-dashboard.yaml @@ -1,12 +1,12 @@ -# Source: {{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.ingressNginx.enabled }} apiVersion: v1 kind: ConfigMap metadata: namespace: {{ .Values.grafana.defaultDashboards.namespace }} name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "ingress-nginx" | trunc 63 | trimSuffix "-" }} - annotations: -{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + {{- if .Values.grafana.sidecar.dashboards.annotations }} + annotations: {{ toYaml .Values.grafana.sidecar.dashboards.annotations | nindent 4 }} + {{- end }} labels: {{- if $.Values.grafana.sidecar.dashboards.label }} {{ $.Values.grafana.sidecar.dashboards.label }}: "1" diff --git a/packages/rancher-monitoring/generated-changes/overlay/templates/rancher-monitoring/exporters/ingress-nginx/service.yaml b/packages/rancher-monitoring/generated-changes/overlay/templates/rancher-monitoring/exporters/ingress-nginx/service.yaml index d256576ad..53a9ad689 100644 --- a/packages/rancher-monitoring/generated-changes/overlay/templates/rancher-monitoring/exporters/ingress-nginx/service.yaml +++ b/packages/rancher-monitoring/generated-changes/overlay/templates/rancher-monitoring/exporters/ingress-nginx/service.yaml @@ -1,4 +1,7 @@ -{{- if .Values.ingressNginx.enabled }} +{{- if and (not .Values.ingressNginx.enabled) (.Values.rkeIngressNginx.enabled) }} +{{- fail "Cannot set .Values.rkeIngressNginx.enabled=true when .Values.ingressNginx.enabled=false" }} +{{- end }} +{{- if and .Values.ingressNginx.enabled (not .Values.rkeIngressNginx.enabled) }} apiVersion: v1 kind: Service metadata: diff --git a/packages/rancher-monitoring/generated-changes/overlay/templates/rancher-monitoring/exporters/ingress-nginx/servicemonitor.yaml b/packages/rancher-monitoring/generated-changes/overlay/templates/rancher-monitoring/exporters/ingress-nginx/servicemonitor.yaml index 643778772..a42f46e5c 100644 --- a/packages/rancher-monitoring/generated-changes/overlay/templates/rancher-monitoring/exporters/ingress-nginx/servicemonitor.yaml +++ b/packages/rancher-monitoring/generated-changes/overlay/templates/rancher-monitoring/exporters/ingress-nginx/servicemonitor.yaml @@ -1,4 +1,7 @@ -{{- if .Values.ingressNginx.enabled }} +{{- if and (not .Values.ingressNginx.enabled) (.Values.rkeIngressNginx.enabled) }} +{{- fail "Cannot set .Values.rkeIngressNginx.enabled=true when .Values.ingressNginx.enabled=false" }} +{{- end }} +{{- if and .Values.ingressNginx.enabled (not .Values.rkeIngressNginx.enabled) }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: diff --git a/packages/rancher-monitoring/generated-changes/patch/Chart.yaml.patch b/packages/rancher-monitoring/generated-changes/patch/Chart.yaml.patch index afb0c6eee..7611b6c7d 100644 --- a/packages/rancher-monitoring/generated-changes/patch/Chart.yaml.patch +++ b/packages/rancher-monitoring/generated-changes/patch/Chart.yaml.patch @@ -16,7 +16,7 @@ apiVersion: v2 appVersion: 0.46.0 dependencies: -@@ -65,19 +74,17 @@ +@@ -71,19 +80,17 @@ - condition: rkeScheduler.enabled name: rkeScheduler repository: file://./charts/rkeScheduler @@ -39,7 +39,7 @@ kubeVersion: '>=1.16.0-0' maintainers: - name: vsliouniaev -@@ -90,9 +97,12 @@ +@@ -96,9 +103,12 @@ name: scottrigby - email: miroslav.hadzhiev@gmail.com name: Xtigyro diff --git a/packages/rancher-monitoring/generated-changes/patch/README.md.patch b/packages/rancher-monitoring/generated-changes/patch/README.md.patch index a00130adc..67d72c1e8 100644 --- a/packages/rancher-monitoring/generated-changes/patch/README.md.patch +++ b/packages/rancher-monitoring/generated-changes/patch/README.md.patch @@ -1,6 +1,6 @@ --- charts-original/README.md +++ charts/README.md -@@ -171,7 +171,41 @@ +@@ -171,7 +171,43 @@ helm show values prometheus-community/kube-prometheus-stack ``` @@ -33,7 +33,9 @@ +| `rkeControllerManager.enabled` | Create a PushProx installation for monitoring kube-controller-manager metrics in RKE clusters | `false` | +| `rkeScheduler.enabled` | Create a PushProx installation for monitoring kube-scheduler metrics in RKE clusters | `false` | +| `rkeProxy.enabled` | Create a PushProx installation for monitoring kube-proxy metrics in RKE clusters | `false` | ++| `rkeIngressNginx.enabled` | Create a PushProx installation for monitoring ingress-nginx metrics in RKE clusters | `false` | +| `rkeEtcd.enabled` | Create a PushProx installation for monitoring etcd metrics in RKE clusters | `false` | ++| `rke2IngressNginx.enabled` | Create a PushProx installation for monitoring ingress-nginx metrics in RKE2 clusters | `false` | +| `k3sServer.enabled` | Create a PushProx installation for monitoring k3s-server metrics (accounts for kube-controller-manager, kube-scheduler, and kube-proxy metrics) in k3s clusters | `false` | +| `kubeAdmControllerManager.enabled` | Create a PushProx installation for monitoring kube-controller-manager metrics in kubeAdm clusters | `false` | +| `kubeAdmScheduler.enabled` | Create a PushProx installation for monitoring kube-scheduler metrics in kubeAdm clusters | `false` | diff --git a/packages/rancher-monitoring/generated-changes/patch/values.yaml.patch b/packages/rancher-monitoring/generated-changes/patch/values.yaml.patch index 1c4882f9f..75d4ac9fe 100644 --- a/packages/rancher-monitoring/generated-changes/patch/values.yaml.patch +++ b/packages/rancher-monitoring/generated-changes/patch/values.yaml.patch @@ -1,6 +1,6 @@ --- charts-original/values.yaml +++ charts/values.yaml -@@ -2,13 +2,375 @@ +@@ -2,13 +2,423 @@ # This is a YAML-formatted file. # Declare variables to be passed into your templates. @@ -84,6 +84,21 @@ + - effect: "NoSchedule" + operator: "Exists" + ++rkeIngressNginx: ++ enabled: false ++ metricsPort: 10254 ++ component: ingress-nginx ++ clients: ++ port: 10015 ++ useLocalhost: true ++ tolerations: ++ - effect: "NoExecute" ++ operator: "Exists" ++ - effect: "NoSchedule" ++ operator: "Exists" ++ nodeSelector: ++ node-role.kubernetes.io/worker: "true" ++ +## k3s PushProx Monitoring +## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox +## @@ -260,6 +275,39 @@ + - effect: "NoSchedule" + operator: "Exists" + ++rke2IngressNginx: ++ enabled: false ++ metricsPort: 10254 ++ component: ingress-nginx ++ clients: ++ port: 10015 ++ useLocalhost: true ++ tolerations: ++ - effect: "NoExecute" ++ operator: "Exists" ++ - effect: "NoSchedule" ++ operator: "Exists" ++ affinity: ++ podAffinity: ++ requiredDuringSchedulingIgnoredDuringExecution: ++ - labelSelector: ++ matchExpressions: ++ - key: "app.kubernetes.io/component" ++ operator: "In" ++ values: ++ - "controller" ++ topologyKey: "kubernetes.io/hostname" ++ namespaces: ++ - "kube-system" ++ # in the RKE2 cluster, the ingress-nginx-controller is deployed as ++ # a Deployment with 1 pod when RKE2 version is <= 1.20, ++ # a DaemonSet when RKE2 version is >= 1.21 ++ deployment: ++ enabled: false ++ replicas: 1 ++ ++ ++ +## Additional PushProx Monitoring +## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox +## @@ -378,7 +426,7 @@ ## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6 ## -@@ -89,8 +451,32 @@ +@@ -89,8 +499,32 @@ ## global: @@ -411,7 +459,7 @@ pspEnabled: true pspAnnotations: {} ## Specify pod annotations -@@ -143,6 +529,22 @@ +@@ -143,6 +577,22 @@ ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file ## https://prometheus.io/webtools/alerting/routing-tree-editor/ ## @@ -434,7 +482,7 @@ config: global: resolve_timeout: 5m -@@ -179,25 +581,76 @@ +@@ -179,25 +629,76 @@ ## ref: https://prometheus.io/docs/alerting/notifications/ ## https://prometheus.io/docs/alerting/notification_examples/ ## @@ -530,7 +578,7 @@ ingress: enabled: false -@@ -235,6 +688,25 @@ +@@ -235,6 +736,25 @@ ## Configuration for Alertmanager secret ## secret: @@ -556,7 +604,7 @@ annotations: {} ## Configuration for creating an Ingress that will map to each Alertmanager replica service -@@ -352,7 +824,7 @@ +@@ -352,7 +872,7 @@ ## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig tlsConfig: {} @@ -565,7 +613,7 @@ ## metric relabel configs to apply to samples before ingestion. ## -@@ -383,7 +855,7 @@ +@@ -383,7 +903,7 @@ ## Image of Alertmanager ## image: @@ -574,7 +622,7 @@ tag: v0.21.0 sha: "" -@@ -495,9 +967,13 @@ +@@ -495,9 +1015,13 @@ ## Define resources requests and limits for single Pods. ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ ## @@ -591,7 +639,7 @@ ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. -@@ -601,10 +1077,46 @@ +@@ -601,10 +1125,46 @@ enabled: true namespaceOverride: "" @@ -638,7 +686,7 @@ adminPassword: prom-operator ingress: -@@ -644,6 +1156,7 @@ +@@ -644,6 +1204,7 @@ dashboards: enabled: true label: grafana_dashboard @@ -646,7 +694,7 @@ ## Annotations for Grafana dashboard configmaps ## -@@ -692,7 +1205,60 @@ +@@ -692,7 +1253,60 @@ ## Passed to grafana subchart and used by servicemonitor below ## service: @@ -708,7 +756,7 @@ ## If true, create a serviceMonitor for grafana ## -@@ -722,6 +1288,14 @@ +@@ -722,6 +1336,14 @@ # targetLabel: nodename # replacement: $1 # action: replace @@ -723,7 +771,7 @@ ## Component scraping the kube api server ## -@@ -879,7 +1453,7 @@ +@@ -879,7 +1501,7 @@ ## Component scraping the kube controller manager ## kubeControllerManager: @@ -732,7 +780,7 @@ ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on ## -@@ -1014,7 +1588,7 @@ +@@ -1014,7 +1636,7 @@ ## Component scraping etcd ## kubeEtcd: @@ -741,7 +789,7 @@ ## If your etcd is not deployed as a pod, specify IPs it can be found on ## -@@ -1076,7 +1650,7 @@ +@@ -1076,7 +1698,7 @@ ## Component scraping kube scheduler ## kubeScheduler: @@ -750,7 +798,7 @@ ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on ## -@@ -1131,7 +1705,7 @@ +@@ -1131,7 +1753,7 @@ ## Component scraping kube proxy ## kubeProxy: @@ -759,7 +807,7 @@ ## If your kube proxy is not deployed as a pod, specify IPs it can be found on ## -@@ -1210,6 +1784,13 @@ +@@ -1210,6 +1832,13 @@ create: true podSecurityPolicy: enabled: true @@ -773,7 +821,7 @@ ## Deploy node exporter as a daemonset to all nodes ## -@@ -1259,6 +1840,16 @@ +@@ -1259,6 +1888,16 @@ extraArgs: - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/) - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ @@ -790,7 +838,7 @@ ## Manages Prometheus and Alertmanager components ## -@@ -1271,8 +1862,8 @@ +@@ -1271,8 +1910,8 @@ enabled: true # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants tlsMinVersion: VersionTLS13 @@ -801,7 +849,7 @@ ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted ## rules from making their way into prometheus and potentially preventing the container from starting -@@ -1289,7 +1880,7 @@ +@@ -1289,7 +1928,7 @@ patch: enabled: true image: @@ -810,7 +858,7 @@ tag: v1.5.0 sha: "" pullPolicy: IfNotPresent -@@ -1428,13 +2019,13 @@ +@@ -1428,13 +2067,13 @@ ## Resource limits & requests ## @@ -831,7 +879,7 @@ # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working -@@ -1487,7 +2078,7 @@ +@@ -1487,7 +2126,7 @@ ## Prometheus-operator image ## image: @@ -840,7 +888,7 @@ tag: v0.46.0 sha: "" pullPolicy: IfNotPresent -@@ -1503,7 +2094,7 @@ +@@ -1503,7 +2142,7 @@ ## Prometheus-config-reloader image to use for config and rule reloading ## prometheusConfigReloaderImage: @@ -849,7 +897,7 @@ tag: v0.46.0 sha: "" -@@ -1558,6 +2149,14 @@ +@@ -1558,6 +2197,14 @@ ## nodePort: 30901 @@ -864,7 +912,7 @@ ## Configuration for Prometheus service ## service: -@@ -1570,7 +2169,7 @@ +@@ -1570,7 +2217,7 @@ port: 9090 ## To be used with a proxy extraContainer port @@ -873,7 +921,7 @@ ## List of IP addresses at which the Prometheus server service is available ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips -@@ -1822,7 +2421,7 @@ +@@ -1822,7 +2469,7 @@ ## Image of Prometheus. ## image: @@ -882,7 +930,7 @@ tag: v2.24.0 sha: "" -@@ -1885,6 +2484,11 @@ +@@ -1885,6 +2532,11 @@ ## externalUrl: "" @@ -894,7 +942,7 @@ ## Define which Nodes the Pods are scheduled on. ## ref: https://kubernetes.io/docs/user-guide/node-selection/ ## -@@ -1917,7 +2521,7 @@ +@@ -1917,7 +2569,7 @@ ## prometheus resource to be created with selectors based on values in the helm deployment, ## which will also match the PrometheusRule resources created ## @@ -903,7 +951,7 @@ ## PrometheusRules to be selected for target discovery. ## If {}, select all PrometheusRules -@@ -1942,7 +2546,7 @@ +@@ -1942,7 +2594,7 @@ ## prometheus resource to be created with selectors based on values in the helm deployment, ## which will also match the servicemonitors created ## @@ -912,7 +960,7 @@ ## ServiceMonitors to be selected for target discovery. ## If {}, select all ServiceMonitors -@@ -1965,7 +2569,7 @@ +@@ -1965,7 +2617,7 @@ ## prometheus resource to be created with selectors based on values in the helm deployment, ## which will also match the podmonitors created ## @@ -921,7 +969,7 @@ ## PodMonitors to be selected for target discovery. ## If {}, select all PodMonitors -@@ -2092,9 +2696,13 @@ +@@ -2092,9 +2744,13 @@ ## Resource limits & requests ## @@ -938,7 +986,7 @@ ## Prometheus StorageSpec for persistent data ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md -@@ -2117,7 +2725,13 @@ +@@ -2117,7 +2773,13 @@ # medium: Memory # Additional volumes on the output StatefulSet definition. @@ -953,7 +1001,7 @@ # Additional VolumeMounts on the output StatefulSet definition. volumeMounts: [] -@@ -2224,9 +2838,34 @@ +@@ -2224,9 +2886,34 @@ ## thanos: {} @@ -989,7 +1037,7 @@ ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes ## (permissions, dir tree) on mounted volumes before starting prometheus -@@ -2234,7 +2873,7 @@ +@@ -2234,7 +2921,7 @@ ## PortName to use for Prometheus. ## diff --git a/packages/rancher-pushprox/charts/README.md b/packages/rancher-pushprox/charts/README.md index ad14761d0..0530c56aa 100755 --- a/packages/rancher-pushprox/charts/README.md +++ b/packages/rancher-pushprox/charts/README.md @@ -24,6 +24,7 @@ The following tables list the configurable parameters of the rancher-pushprox ch | ----- | ----------- | ------ | | `component` | The component that is being monitored | `kube-etcd` | `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | +| `namespaceOverride` | The namespace to install the chart | `""` #### Optional | Parameter | Description | Default | @@ -42,6 +43,9 @@ The following tables list the configurable parameters of the rancher-pushprox ch | `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | | `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | | `clients.rbac.additionalRules` | Additional permissions to provide to the ServiceAccount bound to the client. This can be used to provide additional permissions for the client to scrape metrics from the k8s API. Only enabled if clients.https.enabled and clients.https.useServiceAccountCredentials are true | `[]` | +| `clients.deployment.enabled` | Deploys the client as a Deployment (generally used if the underlying hostNetwork Pod that is being scraped is managed by a Deployment) | `false` | +| `clients.deployment.replicas` | The number of pods the Deployment has, it should match the number of pod the hostNetwork Deployment has. Required and only used if `client.deployment.enable` is set | `0` | +| `clients.deployment.affinity` | The affinity rules that allocate the pod to the node in which the hostNetwork Deployment's pods run. Required and only used if `client.deployment.enable` is set | `{}` | | `clients.resources` | Set resource limits and requests for the client container | `{}` | | `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | | `clients.tolerations` | Specify tolerations for clients | `[]` | diff --git a/packages/rancher-pushprox/charts/templates/_helpers.tpl b/packages/rancher-pushprox/charts/templates/_helpers.tpl index b404126e3..458ad21cd 100644 --- a/packages/rancher-pushprox/charts/templates/_helpers.tpl +++ b/packages/rancher-pushprox/charts/templates/_helpers.tpl @@ -49,7 +49,7 @@ provider: kubernetes {{- if .Values.clients.proxyUrl -}} {{ printf "%s" .Values.clients.proxyUrl }} {{- else -}} -{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) (include "pushprox.namespace" .) (int .Values.proxy.port) }} {{- end -}}{{- end -}} # Client diff --git a/packages/rancher-pushprox/charts/templates/pushprox-clients-rbac.yaml b/packages/rancher-pushprox/charts/templates/pushprox-clients-rbac.yaml index 0f5a377ee..f1a8e7232 100644 --- a/packages/rancher-pushprox/charts/templates/pushprox-clients-rbac.yaml +++ b/packages/rancher-pushprox/charts/templates/pushprox-clients-rbac.yaml @@ -30,20 +30,20 @@ roleRef: subjects: - kind: ServiceAccount name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} --- apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.client.labels" . | nindent 4 }} --- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: {{ template "pushProxy.client.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.client.labels" . | nindent 4 }} spec: privileged: false diff --git a/packages/rancher-pushprox/charts/templates/pushprox-clients.yaml b/packages/rancher-pushprox/charts/templates/pushprox-clients.yaml index ed78792e5..3775d17b8 100644 --- a/packages/rancher-pushprox/charts/templates/pushprox-clients.yaml +++ b/packages/rancher-pushprox/charts/templates/pushprox-clients.yaml @@ -1,18 +1,28 @@ {{- if .Values.clients }}{{- if .Values.clients.enabled }} apiVersion: apps/v1 +{{- if .Values.clients.deployment.enabled }} +kind: Deployment +{{- else }} kind: DaemonSet +{{- end }} metadata: name: {{ template "pushProxy.client.name" . }} namespace: {{ template "pushprox.namespace" . }} labels: {{ include "pushProxy.client.labels" . | nindent 4 }} pushprox-exporter: "client" spec: + {{- if .Values.clients.deployment.enabled }} + replicas: {{ .Values.clients.deployment.replicas }} + {{- end }} selector: matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} template: metadata: labels: {{ include "pushProxy.client.labels" . | nindent 8 }} spec: + {{- if .Values.clients.affinity }} + affinity: {{ toYaml .Values.clients.affinity | nindent 8 }} + {{- end }} nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} {{- if .Values.clients.nodeSelector }} {{ toYaml .Values.clients.nodeSelector | indent 8 }} diff --git a/packages/rancher-pushprox/charts/templates/pushprox-proxy-rbac.yaml b/packages/rancher-pushprox/charts/templates/pushprox-proxy-rbac.yaml index a3509c160..147eb4374 100644 --- a/packages/rancher-pushprox/charts/templates/pushprox-proxy-rbac.yaml +++ b/packages/rancher-pushprox/charts/templates/pushprox-proxy-rbac.yaml @@ -23,20 +23,20 @@ roleRef: subjects: - kind: ServiceAccount name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} --- apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} --- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: {{ template "pushProxy.proxy.name" . }} - namespace: {{ .Release.Namespace }} + namespace: {{ include "pushprox.namespace" . }} labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} spec: privileged: false diff --git a/packages/rancher-pushprox/charts/values.yaml b/packages/rancher-pushprox/charts/values.yaml index 5459dcdc0..6ad1eab4d 100644 --- a/packages/rancher-pushprox/charts/values.yaml +++ b/packages/rancher-pushprox/charts/values.yaml @@ -16,6 +16,8 @@ global: cattle: systemDefaultRegistry: "" +namespaceOverride: "" + # The component that is being monitored (i.e. etcd) component: "component" @@ -69,6 +71,7 @@ clients: # Options to select all nodes to deploy client DaemonSet on nodeSelector: {} tolerations: [] + affinity: {} image: repository: rancher/pushprox-client @@ -79,6 +82,17 @@ clients: repository: rancher/mirrored-library-busybox tag: 1.31.1 + # The default intention of rancher-pushprox clients is to scrape hostNetwork metrics across all nodes. + # This can be used to scrape internal Kubernetes components or DaemonSets of hostNetwork Pods in + # situations where a cloud provider firewall prevents Pod-To-Host communication but not Pod-To-Pod. + # However, if the underlying hostNetwork Pod that is being scraped is managed by a Deployment, + # this advanced option enables users to deploy the client as a Deployment instead of a DaemonSet. + # If a user deploys this feature and the underlying Deployment's number of replicas changes, the user will + # be responsible for upgrading this chart accordingly to the right number of replicas. + deployment: + enabled: false + replicas: 0 + proxy: enabled: true # The port through which PushProx clients will communicate to the proxy