From ba8228500e2a588e72376880284f4b95f5c7425b Mon Sep 17 00:00:00 2001 From: Arvind Iyengar Date: Fri, 18 Jun 2021 14:06:58 -0700 Subject: [PATCH] make validate --- assets/fleet-agent/fleet-agent-0.3.500.tgz | Bin 0 -> 2329 bytes assets/fleet-crd/fleet-crd-0.3.500.tgz | Bin 0 -> 10057 bytes assets/fleet/fleet-0.3.500.tgz | Bin 0 -> 3029 bytes assets/longhorn/longhorn-1.1.100.tgz | Bin 0 -> 15802 bytes assets/longhorn/longhorn-crd-1.1.100.tgz | Bin 0 -> 1807 bytes .../rancher-alerting-drivers-1.0.100.tgz | Bin 0 -> 7357 bytes .../rancher-backup-crd-1.0.400.tgz | Bin 0 -> 1688 bytes .../rancher-backup/rancher-backup-1.0.400.tgz | Bin 0 -> 5742 bytes .../rancher-cis-benchmark-1.0.400.tgz | Bin 0 -> 5079 bytes .../rancher-cis-benchmark-crd-1.0.400.tgz | Bin 0 -> 1450 bytes .../rancher-gatekeeper-3.3.001.tgz | Bin 0 -> 7398 bytes .../rancher-gatekeeper-crd-3.3.001.tgz | Bin 0 -> 3683 bytes .../rancher-grafana-6.6.401.tgz | Bin 0 -> 27606 bytes .../rancher-istio-1.8.500.tgz | Bin 0 -> 19491 bytes .../rancher-istio-1.8.600.tgz | Bin 0 -> 19491 bytes .../rancher-istio-1.9.300.tgz | Bin 0 -> 19318 bytes .../rancher-istio-1.9.500.tgz | Bin 0 -> 19316 bytes .../rancher-kiali-server-1.32.100.tgz | Bin 0 -> 10200 bytes .../rancher-kiali-server-crd-1.32.100.tgz | Bin 0 -> 614 bytes .../rancher-kube-state-metrics-2.13.101.tgz | Bin 0 -> 11616 bytes .../rancher-logging-3.9.400.tgz | Bin 0 -> 10374 bytes .../rancher-logging-crd-3.9.400.tgz | Bin 0 -> 36673 bytes .../rancher-monitoring-14.5.100.tgz | Bin 0 -> 274062 bytes .../rancher-monitoring-crd-14.5.100.tgz | Bin 0 -> 117484 bytes .../rancher-node-exporter-1.16.201.tgz | Bin 0 -> 6662 bytes .../rancher-operator-crd-0.1.400.tgz | Bin 0 -> 8647 bytes .../rancher-operator-0.1.400.tgz | Bin 0 -> 1088 bytes .../rancher-prom2teams-0.2.000.tgz | Bin 0 -> 4289 bytes .../rancher-prometheus-adapter-2.12.101.tgz | Bin 0 -> 8357 bytes .../rancher-pushprox-0.1.300.tgz | Bin 0 -> 6318 bytes .../rancher-sachet/rancher-sachet-1.0.100.tgz | Bin 0 -> 3602 bytes .../rancher-tracing-1.20.100.tgz | Bin 0 -> 3691 bytes .../rancher-webhook-0.1.000.tgz | Bin 0 -> 1210 bytes .../rancher-windows-exporter-0.1.000.tgz | Bin 0 -> 5418 bytes .../rancher-wins-upgrader-0.0.100.tgz | Bin 0 -> 5850 bytes .../fleet-agent/0.3.500/Chart.yaml | 12 + .../0.3.500/templates/_helpers.tpl | 7 + .../0.3.500/templates/configmap.yaml | 12 + .../0.3.500/templates/deployment.yaml | 30 + .../templates/network_policy_allow_all.yaml | 15 + .../patch_default_serviceaccount.yaml | 28 + .../fleet-agent/0.3.500/templates/rbac.yaml | 25 + .../fleet-agent/0.3.500/templates/secret.yaml | 10 + .../0.3.500/templates/serviceaccount.yaml | 4 + .../0.3.500/templates/validate.yaml | 11 + .../fleet-agent/0.3.500/values.yaml | 57 + charts/fleet-crd/fleet-crd/0.3.500/Chart.yaml | 12 + .../fleet-crd/0.3.500/templates/crds.yaml | 2316 ++++++ .../0.3.500/templates/gitjobs-crds.yaml | 3208 ++++++++ charts/fleet/fleet/0.3.500/Chart.yaml | 15 + .../fleet/0.3.500/charts/gitjob/.helmignore | 23 + .../fleet/0.3.500/charts/gitjob/Chart.yaml | 5 + .../charts/gitjob/templates/_helpers.tpl | 7 + .../charts/gitjob/templates/clusterrole.yaml | 38 + .../gitjob/templates/clusterrolebinding.yaml | 12 + .../charts/gitjob/templates/deployment.yaml | 42 + .../charts/gitjob/templates/service.yaml | 12 + .../gitjob/templates/serviceaccount.yaml | 4 + .../fleet/0.3.500/charts/gitjob/values.yaml | 26 + .../fleet/0.3.500/templates/_helpers.tpl | 7 + .../fleet/0.3.500/templates/configmap.yaml | 23 + .../fleet/0.3.500/templates/deployment.yaml | 39 + .../fleet/fleet/0.3.500/templates/rbac.yaml | 106 + .../0.3.500/templates/serviceaccount.yaml | 10 + charts/fleet/fleet/0.3.500/values.yaml | 53 + .../longhorn/longhorn-crd/1.1.100/Chart.yaml | 10 + .../longhorn/longhorn-crd/1.1.100/README.md | 2 + .../longhorn-crd/1.1.100/templates/crds.yaml | 524 ++ charts/longhorn/longhorn/1.1.100/.helmignore | 21 + charts/longhorn/longhorn/1.1.100/Chart.yaml | 37 + charts/longhorn/longhorn/1.1.100/README.md | 32 + .../longhorn/longhorn/1.1.100/app-readme.md | 11 + .../longhorn/longhorn/1.1.100/questions.yml | 542 ++ .../longhorn/1.1.100/templates/NOTES.txt | 5 + .../longhorn/1.1.100/templates/_helpers.tpl | 66 + .../1.1.100/templates/clusterrole.yaml | 47 + .../1.1.100/templates/clusterrolebinding.yaml | 13 + .../1.1.100/templates/daemonset-sa.yaml | 125 + .../1.1.100/templates/default-setting.yaml | 43 + .../1.1.100/templates/deployment-driver.yaml | 104 + .../1.1.100/templates/deployment-ui.yaml | 72 + .../longhorn/1.1.100/templates/ingress.yaml | 34 + .../1.1.100/templates/postupgrade-job.yaml | 48 + .../longhorn/1.1.100/templates/psp.yaml | 66 + .../1.1.100/templates/registry-secret.yml | 11 + .../1.1.100/templates/serviceaccount.yaml | 6 + .../1.1.100/templates/storageclass.yaml | 26 + .../1.1.100/templates/tls-secrets.yaml | 16 + .../1.1.100/templates/uninstall-job.yaml | 49 + .../longhorn/1.1.100/templates/userroles.yaml | 38 + .../templates/validate-install-crd.yaml | 23 + charts/longhorn/longhorn/1.1.100/values.yaml | 220 + .../1.0.100/Chart.yaml | 21 + .../1.0.100/app-readme.md | 11 + .../1.0.100/charts/prom2teams/.helmignore | 22 + .../1.0.100/charts/prom2teams/Chart.yaml | 10 + .../1.0.100/charts/prom2teams/files/teams.j2 | 44 + .../charts/prom2teams/templates/NOTES.txt | 2 + .../charts/prom2teams/templates/_helpers.tpl | 73 + .../prom2teams/templates/configmap.yaml | 39 + .../prom2teams/templates/deployment.yaml | 77 + .../charts/prom2teams/templates/psp.yaml | 28 + .../charts/prom2teams/templates/role.yaml | 15 + .../prom2teams/templates/rolebinding.yaml | 13 + .../prom2teams/templates/service-account.yaml | 6 + .../charts/prom2teams/templates/service.yaml | 17 + .../1.0.100/charts/prom2teams/values.yaml | 62 + .../1.0.100/charts/sachet/.helmignore | 23 + .../1.0.100/charts/sachet/Chart.yaml | 11 + .../1.0.100/charts/sachet/files/template.tmpl | 1 + .../1.0.100/charts/sachet/templates/NOTES.txt | 3 + .../charts/sachet/templates/_helpers.tpl | 79 + .../templates/configmap-pre-install.yaml | 34 + .../charts/sachet/templates/deployment.yaml | 75 + .../1.0.100/charts/sachet/templates/psp.yaml | 28 + .../1.0.100/charts/sachet/templates/role.yaml | 15 + .../charts/sachet/templates/rolebinding.yaml | 13 + .../sachet/templates/service-account.yaml | 6 + .../charts/sachet/templates/service.yaml | 17 + .../1.0.100/charts/sachet/values.yaml | 63 + .../1.0.100/questions.yml | 14 + .../1.0.100/templates/NOTES.txt | 2 + .../1.0.100/templates/_helpers.tpl | 91 + .../1.0.100/templates/cluster-role.yaml | 50 + .../1.0.100/values.yaml | 17 + .../rancher-backup-crd/1.0.400/Chart.yaml | 11 + .../rancher-backup-crd/1.0.400/README.md | 3 + .../1.0.400/templates/backup.yaml | 119 + .../1.0.400/templates/resourceset.yaml | 94 + .../1.0.400/templates/restore.yaml | 102 + .../rancher-backup/1.0.400/Chart.yaml | 20 + .../rancher-backup/1.0.400/README.md | 69 + .../rancher-backup/1.0.400/app-readme.md | 15 + .../default-resourceset-contents/eks.yaml | 17 + .../default-resourceset-contents/fleet.yaml | 49 + .../default-resourceset-contents/gke.yaml | 17 + .../rancher-operator.yaml | 27 + .../default-resourceset-contents/rancher.yaml | 44 + .../1.0.400/templates/_helpers.tpl | 76 + .../1.0.400/templates/clusterrolebinding.yaml | 14 + .../1.0.400/templates/deployment.yaml | 59 + .../rancher-backup/1.0.400/templates/pvc.yaml | 27 + .../templates/rancher-resourceset.yaml | 13 + .../1.0.400/templates/s3-secret.yaml | 31 + .../1.0.400/templates/serviceaccount.yaml | 7 + .../templates/validate-install-crd.yaml | 16 + .../rancher-backup/1.0.400/values.yaml | 49 + .../1.0.400/Chart.yaml | 10 + .../1.0.400/README.md | 2 + .../1.0.400/templates/clusterscan.yaml | 149 + .../templates/clusterscanbenchmark.yaml | 55 + .../1.0.400/templates/clusterscanprofile.yaml | 37 + .../1.0.400/templates/clusterscanreport.yaml | 40 + .../rancher-cis-benchmark/1.0.400/Chart.yaml | 18 + .../rancher-cis-benchmark/1.0.400/README.md | 9 + .../1.0.400/app-readme.md | 15 + .../1.0.400/templates/_helpers.tpl | 23 + .../1.0.400/templates/alertingrule.yaml | 14 + .../1.0.400/templates/benchmark-cis-1.5.yaml | 8 + .../1.0.400/templates/benchmark-cis-1.6.yaml | 8 + .../1.0.400/templates/benchmark-eks-1.0.yaml | 8 + .../1.0.400/templates/benchmark-gke-1.0.yaml | 8 + .../benchmark-k3s-cis-1.6-hardened.yaml | 8 + .../benchmark-k3s-cis-1.6-permissive.yaml | 8 + .../benchmark-rke-cis-1.5-hardened.yaml | 8 + .../benchmark-rke-cis-1.5-permissive.yaml | 8 + .../benchmark-rke-cis-1.6-hardened.yaml | 8 + .../benchmark-rke-cis-1.6-permissive.yaml | 8 + .../benchmark-rke2-cis-1.5-hardened.yaml | 8 + .../benchmark-rke2-cis-1.5-permissive.yaml | 8 + .../benchmark-rke2-cis-1.6-hardened.yaml | 8 + .../benchmark-rke2-cis-1.6-permissive.yaml | 8 + .../1.0.400/templates/cis-roles.yaml | 49 + .../1.0.400/templates/configmap.yaml | 17 + .../1.0.400/templates/deployment.yaml | 57 + .../templates/network_policy_allow_all.yaml | 15 + .../patch_default_serviceaccount.yaml | 20 + .../1.0.400/templates/rbac.yaml | 43 + .../1.0.400/templates/scanprofile-cis-1.5.yml | 9 + .../templates/scanprofile-cis-1.6.yaml | 9 + .../scanprofile-k3s-cis-1.6-hardened.yml | 9 + .../scanprofile-k3s-cis-1.6-permissive.yml | 9 + .../scanprofile-rke-1.5-hardened.yml | 9 + .../scanprofile-rke-1.5-permissive.yml | 9 + .../scanprofile-rke-1.6-hardened.yaml | 9 + .../scanprofile-rke-1.6-permissive.yaml | 9 + .../scanprofile-rke2-cis-1.5-hardened.yml | 9 + .../scanprofile-rke2-cis-1.5-permissive.yml | 9 + .../scanprofile-rke2-cis-1.6-hardened.yml | 9 + .../scanprofile-rke2-cis-1.6-permissive.yml | 9 + .../1.0.400/templates/scanprofileeks.yml | 9 + .../1.0.400/templates/scanprofilegke.yml | 9 + .../1.0.400/templates/serviceaccount.yaml | 14 + .../templates/validate-install-crd.yaml | 17 + .../rancher-cis-benchmark/1.0.400/values.yaml | 45 + .../rancher-gatekeeper-crd/3.3.001/Chart.yaml | 10 + .../rancher-gatekeeper-crd/3.3.001/README.md | 2 + .../config-customresourcedefinition.yaml | 106 + ...intpodstatus-customresourcedefinition.yaml | 68 + ...ainttemplate-customresourcedefinition.yaml | 97 + ...atepodstatus-customresourcedefinition.yaml | 67 + .../3.3.001/templates/_helpers.tpl | 7 + .../3.3.001/templates/jobs.yaml | 92 + .../3.3.001/templates/manifest.yaml | 14 + .../3.3.001/templates/rbac.yaml | 72 + .../3.3.001/values.yaml | 11 + .../rancher-gatekeeper/3.3.001/.helmignore | 21 + .../rancher-gatekeeper/3.3.001/CHANGELOG.md | 15 + .../rancher-gatekeeper/3.3.001/Chart.yaml | 22 + .../rancher-gatekeeper/3.3.001/README.md | 39 + .../rancher-gatekeeper/3.3.001/app-readme.md | 14 + .../3.3.001/templates/_helpers.tpl | 52 + .../3.3.001/templates/allowedrepos.yaml | 35 + .../gatekeeper-admin-podsecuritypolicy.yaml | 35 + .../gatekeeper-admin-serviceaccount.yaml | 11 + .../gatekeeper-audit-deployment.yaml | 103 + ...ekeeper-controller-manager-deployment.yaml | 124 + .../gatekeeper-manager-role-clusterrole.yaml | 139 + .../gatekeeper-manager-role-role.yaml | 32 + ...anager-rolebinding-clusterrolebinding.yaml | 18 + ...eeper-manager-rolebinding-rolebinding.yaml | 19 + ...ration-validatingwebhookconfiguration.yaml | 61 + ...gatekeeper-webhook-server-cert-secret.yaml | 13 + .../gatekeeper-webhook-service-service.yaml | 23 + .../3.3.001/templates/requiredlabels.yaml | 57 + .../templates/validate-install-crd.yaml | 17 + .../rancher-gatekeeper/3.3.001/values.yaml | 60 + .../rancher-grafana/6.6.401/.helmignore | 23 + .../rancher-grafana/6.6.401/Chart.yaml | 28 + .../rancher-grafana/6.6.401/README.md | 514 ++ .../6.6.401/dashboards/custom-dashboard.json | 1 + .../6.6.401/templates/NOTES.txt | 54 + .../6.6.401/templates/_helpers.tpl | 145 + .../6.6.401/templates/_pod.tpl | 496 ++ .../6.6.401/templates/clusterrole.yaml | 25 + .../6.6.401/templates/clusterrolebinding.yaml | 24 + .../configmap-dashboard-provider.yaml | 29 + .../6.6.401/templates/configmap.yaml | 80 + .../templates/dashboards-json-configmap.yaml | 35 + .../6.6.401/templates/deployment.yaml | 48 + .../6.6.401/templates/headless-service.yaml | 18 + .../templates/image-renderer-deployment.yaml | 117 + .../image-renderer-network-policy.yaml | 76 + .../templates/image-renderer-service.yaml | 28 + .../6.6.401/templates/ingress.yaml | 80 + .../6.6.401/templates/nginx-config.yaml | 75 + .../templates/poddisruptionbudget.yaml | 22 + .../6.6.401/templates/podsecuritypolicy.yaml | 49 + .../6.6.401/templates/pvc.yaml | 33 + .../6.6.401/templates/role.yaml | 32 + .../6.6.401/templates/rolebinding.yaml | 25 + .../6.6.401/templates/secret-env.yaml | 14 + .../6.6.401/templates/secret.yaml | 22 + .../6.6.401/templates/service.yaml | 50 + .../6.6.401/templates/serviceaccount.yaml | 13 + .../6.6.401/templates/servicemonitor.yaml | 40 + .../6.6.401/templates/statefulset.yaml | 52 + .../templates/tests/test-configmap.yaml | 17 + .../tests/test-podsecuritypolicy.yaml | 30 + .../6.6.401/templates/tests/test-role.yaml | 14 + .../templates/tests/test-rolebinding.yaml | 17 + .../templates/tests/test-serviceaccount.yaml | 9 + .../6.6.401/templates/tests/test.yaml | 48 + .../rancher-grafana/6.6.401/values.yaml | 732 ++ .../rancher-istio/1.8.500/Chart.yaml | 21 + .../rancher-istio/1.8.500/README.md | 69 + .../rancher-istio/1.8.500/app-readme.md | 45 + .../1.8.500/charts/kiali/Chart.yaml | 31 + .../1.8.500/charts/kiali/templates/NOTES.txt | 5 + .../charts/kiali/templates/_helpers.tpl | 192 + .../charts/kiali/templates/cabundle.yaml | 13 + .../charts/kiali/templates/configmap.yaml | 24 + .../kiali/templates/dashboards/envoy.yaml | 56 + .../charts/kiali/templates/dashboards/go.yaml | 67 + .../kiali/templates/dashboards/kiali.yaml | 44 + .../dashboards/micrometer-1.0.6-jvm-pool.yaml | 43 + .../dashboards/micrometer-1.0.6-jvm.yaml | 65 + .../dashboards/micrometer-1.1-jvm.yaml | 68 + .../dashboards/microprofile-1.1.yaml | 59 + .../dashboards/microprofile-x.y.yaml | 38 + .../kiali/templates/dashboards/nodejs.yaml | 59 + .../kiali/templates/dashboards/quarkus.yaml | 33 + .../dashboards/springboot-jvm-pool.yaml | 16 + .../templates/dashboards/springboot-jvm.yaml | 16 + .../dashboards/springboot-tomcat.yaml | 16 + .../kiali/templates/dashboards/thorntail.yaml | 22 + .../kiali/templates/dashboards/tomcat.yaml | 67 + .../templates/dashboards/vertx-client.yaml | 60 + .../templates/dashboards/vertx-eventbus.yaml | 59 + .../kiali/templates/dashboards/vertx-jvm.yaml | 16 + .../templates/dashboards/vertx-pool.yaml | 68 + .../templates/dashboards/vertx-server.yaml | 62 + .../charts/kiali/templates/deployment.yaml | 174 + .../1.8.500/charts/kiali/templates/hpa.yaml | 17 + .../charts/kiali/templates/ingress.yaml | 40 + .../1.8.500/charts/kiali/templates/oauth.yaml | 17 + .../1.8.500/charts/kiali/templates/psp.yaml | 67 + .../kiali/templates/role-controlplane.yaml | 15 + .../charts/kiali/templates/role-viewer.yaml | 97 + .../1.8.500/charts/kiali/templates/role.yaml | 108 + .../templates/rolebinding-controlplane.yaml | 17 + .../charts/kiali/templates/rolebinding.yaml | 20 + .../1.8.500/charts/kiali/templates/route.yaml | 30 + .../charts/kiali/templates/service.yaml | 47 + .../kiali/templates/serviceaccount.yaml | 9 + .../kiali/templates/validate-install-crd.yaml | 14 + .../kiali/templates/web-root-configmap.yaml | 12 + .../1.8.500/charts/kiali/values.yaml | 93 + .../1.8.500/charts/tracing/.helmignore | 23 + .../1.8.500/charts/tracing/Chart.yaml | 12 + .../1.8.500/charts/tracing/README.md | 5 + .../charts/tracing/templates/_affinity.tpl | 92 + .../charts/tracing/templates/_helpers.tpl | 32 + .../charts/tracing/templates/deployment.yaml | 86 + .../1.8.500/charts/tracing/templates/psp.yaml | 86 + .../1.8.500/charts/tracing/templates/pvc.yaml | 16 + .../charts/tracing/templates/service.yaml | 63 + .../1.8.500/charts/tracing/values.yaml | 44 + .../1.8.500/configs/istio-base.yaml | 89 + .../rancher-istio/1.8.500/requirements.yaml | 17 + .../1.8.500/samples/overlay-example.yaml | 37 + .../1.8.500/templates/_helpers.tpl | 12 + .../1.8.500/templates/admin-role.yaml | 43 + .../1.8.500/templates/base-config-map.yaml | 7 + .../1.8.500/templates/clusterrole.yaml | 120 + .../1.8.500/templates/clusterrolebinding.yaml | 12 + .../1.8.500/templates/edit-role.yaml | 43 + .../1.8.500/templates/istio-cni-psp.yaml | 51 + .../1.8.500/templates/istio-install-job.yaml | 50 + .../1.8.500/templates/istio-install-psp.yaml | 30 + .../1.8.500/templates/istio-psp.yaml | 81 + .../templates/istio-uninstall-job.yaml | 45 + .../1.8.500/templates/overlay-config-map.yaml | 9 + .../1.8.500/templates/service-monitors.yaml | 51 + .../1.8.500/templates/serviceaccount.yaml | 5 + .../1.8.500/templates/view-role.yaml | 41 + .../rancher-istio/1.8.500/values.yaml | 95 + .../rancher-istio/1.8.600/Chart.yaml | 21 + .../rancher-istio/1.8.600/README.md | 69 + .../rancher-istio/1.8.600/app-readme.md | 45 + .../1.8.600/charts/kiali/Chart.yaml | 31 + .../1.8.600/charts/kiali/templates/NOTES.txt | 5 + .../charts/kiali/templates/_helpers.tpl | 192 + .../charts/kiali/templates/cabundle.yaml | 13 + .../charts/kiali/templates/configmap.yaml | 24 + .../kiali/templates/dashboards/envoy.yaml | 56 + .../charts/kiali/templates/dashboards/go.yaml | 67 + .../kiali/templates/dashboards/kiali.yaml | 44 + .../dashboards/micrometer-1.0.6-jvm-pool.yaml | 43 + .../dashboards/micrometer-1.0.6-jvm.yaml | 65 + .../dashboards/micrometer-1.1-jvm.yaml | 68 + .../dashboards/microprofile-1.1.yaml | 59 + .../dashboards/microprofile-x.y.yaml | 38 + .../kiali/templates/dashboards/nodejs.yaml | 59 + .../kiali/templates/dashboards/quarkus.yaml | 33 + .../dashboards/springboot-jvm-pool.yaml | 16 + .../templates/dashboards/springboot-jvm.yaml | 16 + .../dashboards/springboot-tomcat.yaml | 16 + .../kiali/templates/dashboards/thorntail.yaml | 22 + .../kiali/templates/dashboards/tomcat.yaml | 67 + .../templates/dashboards/vertx-client.yaml | 60 + .../templates/dashboards/vertx-eventbus.yaml | 59 + .../kiali/templates/dashboards/vertx-jvm.yaml | 16 + .../templates/dashboards/vertx-pool.yaml | 68 + .../templates/dashboards/vertx-server.yaml | 62 + .../charts/kiali/templates/deployment.yaml | 174 + .../1.8.600/charts/kiali/templates/hpa.yaml | 17 + .../charts/kiali/templates/ingress.yaml | 40 + .../1.8.600/charts/kiali/templates/oauth.yaml | 17 + .../1.8.600/charts/kiali/templates/psp.yaml | 67 + .../kiali/templates/role-controlplane.yaml | 15 + .../charts/kiali/templates/role-viewer.yaml | 97 + .../1.8.600/charts/kiali/templates/role.yaml | 108 + .../templates/rolebinding-controlplane.yaml | 17 + .../charts/kiali/templates/rolebinding.yaml | 20 + .../1.8.600/charts/kiali/templates/route.yaml | 30 + .../charts/kiali/templates/service.yaml | 47 + .../kiali/templates/serviceaccount.yaml | 9 + .../kiali/templates/validate-install-crd.yaml | 14 + .../kiali/templates/web-root-configmap.yaml | 12 + .../1.8.600/charts/kiali/values.yaml | 93 + .../1.8.600/charts/tracing/.helmignore | 23 + .../1.8.600/charts/tracing/Chart.yaml | 12 + .../1.8.600/charts/tracing/README.md | 5 + .../charts/tracing/templates/_affinity.tpl | 92 + .../charts/tracing/templates/_helpers.tpl | 32 + .../charts/tracing/templates/deployment.yaml | 86 + .../1.8.600/charts/tracing/templates/psp.yaml | 86 + .../1.8.600/charts/tracing/templates/pvc.yaml | 16 + .../charts/tracing/templates/service.yaml | 63 + .../1.8.600/charts/tracing/values.yaml | 44 + .../1.8.600/configs/istio-base.yaml | 89 + .../rancher-istio/1.8.600/requirements.yaml | 17 + .../1.8.600/samples/overlay-example.yaml | 37 + .../1.8.600/templates/_helpers.tpl | 12 + .../1.8.600/templates/admin-role.yaml | 43 + .../1.8.600/templates/base-config-map.yaml | 7 + .../1.8.600/templates/clusterrole.yaml | 120 + .../1.8.600/templates/clusterrolebinding.yaml | 12 + .../1.8.600/templates/edit-role.yaml | 43 + .../1.8.600/templates/istio-cni-psp.yaml | 51 + .../1.8.600/templates/istio-install-job.yaml | 50 + .../1.8.600/templates/istio-install-psp.yaml | 30 + .../1.8.600/templates/istio-psp.yaml | 81 + .../templates/istio-uninstall-job.yaml | 45 + .../1.8.600/templates/overlay-config-map.yaml | 9 + .../1.8.600/templates/service-monitors.yaml | 51 + .../1.8.600/templates/serviceaccount.yaml | 5 + .../1.8.600/templates/view-role.yaml | 41 + .../rancher-istio/1.8.600/values.yaml | 95 + .../rancher-istio/1.9.300/Chart.yaml | 21 + .../rancher-istio/1.9.300/README.md | 69 + .../rancher-istio/1.9.300/app-readme.md | 45 + .../1.9.300/charts/kiali/Chart.yaml | 31 + .../1.9.300/charts/kiali/templates/NOTES.txt | 5 + .../charts/kiali/templates/_helpers.tpl | 192 + .../charts/kiali/templates/cabundle.yaml | 13 + .../charts/kiali/templates/configmap.yaml | 24 + .../kiali/templates/dashboards/envoy.yaml | 56 + .../charts/kiali/templates/dashboards/go.yaml | 67 + .../kiali/templates/dashboards/kiali.yaml | 44 + .../dashboards/micrometer-1.0.6-jvm-pool.yaml | 43 + .../dashboards/micrometer-1.0.6-jvm.yaml | 65 + .../dashboards/micrometer-1.1-jvm.yaml | 68 + .../dashboards/microprofile-1.1.yaml | 59 + .../dashboards/microprofile-x.y.yaml | 38 + .../kiali/templates/dashboards/nodejs.yaml | 59 + .../kiali/templates/dashboards/quarkus.yaml | 33 + .../dashboards/springboot-jvm-pool.yaml | 16 + .../templates/dashboards/springboot-jvm.yaml | 16 + .../dashboards/springboot-tomcat.yaml | 16 + .../kiali/templates/dashboards/thorntail.yaml | 22 + .../kiali/templates/dashboards/tomcat.yaml | 67 + .../templates/dashboards/vertx-client.yaml | 60 + .../templates/dashboards/vertx-eventbus.yaml | 59 + .../kiali/templates/dashboards/vertx-jvm.yaml | 16 + .../templates/dashboards/vertx-pool.yaml | 68 + .../templates/dashboards/vertx-server.yaml | 62 + .../charts/kiali/templates/deployment.yaml | 174 + .../1.9.300/charts/kiali/templates/hpa.yaml | 17 + .../charts/kiali/templates/ingress.yaml | 40 + .../1.9.300/charts/kiali/templates/oauth.yaml | 17 + .../1.9.300/charts/kiali/templates/psp.yaml | 67 + .../kiali/templates/role-controlplane.yaml | 15 + .../charts/kiali/templates/role-viewer.yaml | 97 + .../1.9.300/charts/kiali/templates/role.yaml | 108 + .../templates/rolebinding-controlplane.yaml | 17 + .../charts/kiali/templates/rolebinding.yaml | 20 + .../1.9.300/charts/kiali/templates/route.yaml | 30 + .../charts/kiali/templates/service.yaml | 47 + .../kiali/templates/serviceaccount.yaml | 9 + .../kiali/templates/validate-install-crd.yaml | 14 + .../kiali/templates/web-root-configmap.yaml | 12 + .../1.9.300/charts/kiali/values.yaml | 93 + .../1.9.300/charts/tracing/.helmignore | 23 + .../1.9.300/charts/tracing/Chart.yaml | 12 + .../1.9.300/charts/tracing/README.md | 5 + .../charts/tracing/templates/_affinity.tpl | 92 + .../charts/tracing/templates/_helpers.tpl | 32 + .../charts/tracing/templates/deployment.yaml | 86 + .../1.9.300/charts/tracing/templates/psp.yaml | 86 + .../1.9.300/charts/tracing/templates/pvc.yaml | 16 + .../charts/tracing/templates/service.yaml | 63 + .../1.9.300/charts/tracing/values.yaml | 44 + .../1.9.300/configs/istio-base.yaml | 82 + .../rancher-istio/1.9.300/requirements.yaml | 17 + .../1.9.300/samples/overlay-example.yaml | 37 + .../1.9.300/templates/_helpers.tpl | 12 + .../1.9.300/templates/admin-role.yaml | 43 + .../1.9.300/templates/base-config-map.yaml | 7 + .../1.9.300/templates/clusterrole.yaml | 120 + .../1.9.300/templates/clusterrolebinding.yaml | 12 + .../1.9.300/templates/edit-role.yaml | 43 + .../1.9.300/templates/istio-cni-psp.yaml | 51 + .../1.9.300/templates/istio-install-job.yaml | 50 + .../1.9.300/templates/istio-install-psp.yaml | 30 + .../1.9.300/templates/istio-psp.yaml | 81 + .../templates/istio-uninstall-job.yaml | 45 + .../1.9.300/templates/overlay-config-map.yaml | 9 + .../1.9.300/templates/service-monitors.yaml | 51 + .../1.9.300/templates/serviceaccount.yaml | 5 + .../1.9.300/templates/view-role.yaml | 41 + .../rancher-istio/1.9.300/values.yaml | 85 + .../rancher-istio/1.9.500/Chart.yaml | 21 + .../rancher-istio/1.9.500/README.md | 69 + .../rancher-istio/1.9.500/app-readme.md | 45 + .../1.9.500/charts/kiali/Chart.yaml | 31 + .../1.9.500/charts/kiali/templates/NOTES.txt | 5 + .../charts/kiali/templates/_helpers.tpl | 192 + .../charts/kiali/templates/cabundle.yaml | 13 + .../charts/kiali/templates/configmap.yaml | 24 + .../kiali/templates/dashboards/envoy.yaml | 56 + .../charts/kiali/templates/dashboards/go.yaml | 67 + .../kiali/templates/dashboards/kiali.yaml | 44 + .../dashboards/micrometer-1.0.6-jvm-pool.yaml | 43 + .../dashboards/micrometer-1.0.6-jvm.yaml | 65 + .../dashboards/micrometer-1.1-jvm.yaml | 68 + .../dashboards/microprofile-1.1.yaml | 59 + .../dashboards/microprofile-x.y.yaml | 38 + .../kiali/templates/dashboards/nodejs.yaml | 59 + .../kiali/templates/dashboards/quarkus.yaml | 33 + .../dashboards/springboot-jvm-pool.yaml | 16 + .../templates/dashboards/springboot-jvm.yaml | 16 + .../dashboards/springboot-tomcat.yaml | 16 + .../kiali/templates/dashboards/thorntail.yaml | 22 + .../kiali/templates/dashboards/tomcat.yaml | 67 + .../templates/dashboards/vertx-client.yaml | 60 + .../templates/dashboards/vertx-eventbus.yaml | 59 + .../kiali/templates/dashboards/vertx-jvm.yaml | 16 + .../templates/dashboards/vertx-pool.yaml | 68 + .../templates/dashboards/vertx-server.yaml | 62 + .../charts/kiali/templates/deployment.yaml | 174 + .../1.9.500/charts/kiali/templates/hpa.yaml | 17 + .../charts/kiali/templates/ingress.yaml | 40 + .../1.9.500/charts/kiali/templates/oauth.yaml | 17 + .../1.9.500/charts/kiali/templates/psp.yaml | 67 + .../kiali/templates/role-controlplane.yaml | 15 + .../charts/kiali/templates/role-viewer.yaml | 97 + .../1.9.500/charts/kiali/templates/role.yaml | 108 + .../templates/rolebinding-controlplane.yaml | 17 + .../charts/kiali/templates/rolebinding.yaml | 20 + .../1.9.500/charts/kiali/templates/route.yaml | 30 + .../charts/kiali/templates/service.yaml | 47 + .../kiali/templates/serviceaccount.yaml | 9 + .../kiali/templates/validate-install-crd.yaml | 14 + .../kiali/templates/web-root-configmap.yaml | 12 + .../1.9.500/charts/kiali/values.yaml | 93 + .../1.9.500/charts/tracing/.helmignore | 23 + .../1.9.500/charts/tracing/Chart.yaml | 12 + .../1.9.500/charts/tracing/README.md | 5 + .../charts/tracing/templates/_affinity.tpl | 92 + .../charts/tracing/templates/_helpers.tpl | 32 + .../charts/tracing/templates/deployment.yaml | 86 + .../1.9.500/charts/tracing/templates/psp.yaml | 86 + .../1.9.500/charts/tracing/templates/pvc.yaml | 16 + .../charts/tracing/templates/service.yaml | 63 + .../1.9.500/charts/tracing/values.yaml | 44 + .../1.9.500/configs/istio-base.yaml | 82 + .../rancher-istio/1.9.500/requirements.yaml | 17 + .../1.9.500/samples/overlay-example.yaml | 37 + .../1.9.500/templates/_helpers.tpl | 12 + .../1.9.500/templates/admin-role.yaml | 43 + .../1.9.500/templates/base-config-map.yaml | 7 + .../1.9.500/templates/clusterrole.yaml | 120 + .../1.9.500/templates/clusterrolebinding.yaml | 12 + .../1.9.500/templates/edit-role.yaml | 43 + .../1.9.500/templates/istio-cni-psp.yaml | 51 + .../1.9.500/templates/istio-install-job.yaml | 50 + .../1.9.500/templates/istio-install-psp.yaml | 30 + .../1.9.500/templates/istio-psp.yaml | 81 + .../templates/istio-uninstall-job.yaml | 45 + .../1.9.500/templates/overlay-config-map.yaml | 9 + .../1.9.500/templates/service-monitors.yaml | 51 + .../1.9.500/templates/serviceaccount.yaml | 5 + .../1.9.500/templates/view-role.yaml | 41 + .../rancher-istio/1.9.500/values.yaml | 85 + .../1.32.100/Chart.yaml | 7 + .../1.32.100/README.md | 2 + .../1.32.100/templates/crds.yaml | 22 + .../rancher-kiali-server/1.32.100/Chart.yaml | 31 + .../1.32.100/templates/NOTES.txt | 5 + .../1.32.100/templates/_helpers.tpl | 192 + .../1.32.100/templates/cabundle.yaml | 13 + .../1.32.100/templates/configmap.yaml | 24 + .../1.32.100/templates/dashboards/envoy.yaml | 56 + .../1.32.100/templates/dashboards/go.yaml | 67 + .../1.32.100/templates/dashboards/kiali.yaml | 44 + .../dashboards/micrometer-1.0.6-jvm-pool.yaml | 43 + .../dashboards/micrometer-1.0.6-jvm.yaml | 65 + .../dashboards/micrometer-1.1-jvm.yaml | 68 + .../dashboards/microprofile-1.1.yaml | 59 + .../dashboards/microprofile-x.y.yaml | 38 + .../1.32.100/templates/dashboards/nodejs.yaml | 59 + .../templates/dashboards/quarkus.yaml | 33 + .../dashboards/springboot-jvm-pool.yaml | 16 + .../templates/dashboards/springboot-jvm.yaml | 16 + .../dashboards/springboot-tomcat.yaml | 16 + .../templates/dashboards/thorntail.yaml | 22 + .../1.32.100/templates/dashboards/tomcat.yaml | 67 + .../templates/dashboards/vertx-client.yaml | 60 + .../templates/dashboards/vertx-eventbus.yaml | 59 + .../templates/dashboards/vertx-jvm.yaml | 16 + .../templates/dashboards/vertx-pool.yaml | 68 + .../templates/dashboards/vertx-server.yaml | 62 + .../1.32.100/templates/deployment.yaml | 174 + .../1.32.100/templates/hpa.yaml | 17 + .../1.32.100/templates/ingress.yaml | 40 + .../1.32.100/templates/oauth.yaml | 17 + .../1.32.100/templates/psp.yaml | 67 + .../1.32.100/templates/role-controlplane.yaml | 15 + .../1.32.100/templates/role-viewer.yaml | 97 + .../1.32.100/templates/role.yaml | 108 + .../templates/rolebinding-controlplane.yaml | 17 + .../1.32.100/templates/rolebinding.yaml | 20 + .../1.32.100/templates/route.yaml | 30 + .../1.32.100/templates/service.yaml | 47 + .../1.32.100/templates/serviceaccount.yaml | 9 + .../templates/validate-install-crd.yaml | 14 + .../templates/web-root-configmap.yaml | 12 + .../rancher-kiali-server/1.32.100/values.yaml | 93 + .../2.13.101/.helmignore | 21 + .../2.13.101/Chart.yaml | 24 + .../2.13.101/LICENSE | 202 + .../2.13.101/OWNERS | 6 + .../2.13.101/README.md | 66 + .../2.13.101/templates/NOTES.txt | 10 + .../2.13.101/templates/_helpers.tpl | 76 + .../templates/clusterrolebinding.yaml | 23 + .../2.13.101/templates/deployment.yaml | 217 + .../2.13.101/templates/kubeconfig-secret.yaml | 15 + .../2.13.101/templates/pdb.yaml | 20 + .../2.13.101/templates/podsecuritypolicy.yaml | 42 + .../2.13.101/templates/psp-clusterrole.yaml | 22 + .../templates/psp-clusterrolebinding.yaml | 19 + .../2.13.101/templates/role.yaml | 192 + .../2.13.101/templates/rolebinding.yaml | 27 + .../2.13.101/templates/service.yaml | 42 + .../2.13.101/templates/serviceaccount.yaml | 18 + .../2.13.101/templates/servicemonitor.yaml | 34 + .../2.13.101/templates/stsdiscovery-role.yaml | 29 + .../templates/stsdiscovery-rolebinding.yaml | 20 + .../2.13.101/values.yaml | 184 + .../rancher-logging-crd/3.9.400/Chart.yaml | 10 + .../rancher-logging-crd/3.9.400/README.md | 2 + .../logging.banzaicloud.io_clusterflows.yaml | 765 ++ ...logging.banzaicloud.io_clusteroutputs.yaml | 4721 +++++++++++ .../logging.banzaicloud.io_flows.yaml | 761 ++ .../logging.banzaicloud.io_loggings.yaml | 7095 +++++++++++++++++ .../logging.banzaicloud.io_outputs.yaml | 4715 +++++++++++ .../rancher-logging/3.9.400/.helmignore | 22 + .../rancher-logging/3.9.400/Chart.yaml | 19 + .../rancher-logging/3.9.400/README.md | 131 + .../rancher-logging/3.9.400/app-readme.md | 22 + .../3.9.400/templates/NOTES.txt | 0 .../3.9.400/templates/_helpers.tpl | 76 + .../3.9.400/templates/clusterrole.yaml | 167 + .../3.9.400/templates/clusterrolebinding.yaml | 18 + .../3.9.400/templates/crds.yaml | 6 + .../3.9.400/templates/deployment.yaml | 68 + .../templates/loggings/aks/logging.yaml | 58 + .../templates/loggings/eks/logging.yaml | 59 + .../templates/loggings/gke/logging.yaml | 58 + .../loggings/k3s/logging-k3s-openrc.yaml | 68 + .../loggings/k3s/logging-k3s-systemd.yaml | 68 + .../templates/loggings/rke/configmap.yaml | 29 + .../templates/loggings/rke/daemonset.yaml | 124 + .../templates/loggings/rke2/configmap.yaml | 22 + .../templates/loggings/rke2/daemonset.yaml | 110 + .../rke2/logging-rke2-containers.yaml | 73 + .../templates/loggings/root/logging.yaml | 111 + .../3.9.400/templates/psp.yaml | 33 + .../3.9.400/templates/service.yaml | 20 + .../3.9.400/templates/serviceMonitor.yaml | 30 + .../3.9.400/templates/serviceaccount.yaml | 10 + .../3.9.400/templates/userroles.yaml | 35 + .../templates/validate-install-crd.yaml | 18 + .../3.9.400/templates/validate-install.yaml | 5 + .../rancher-logging/3.9.400/values.yaml | 176 + .../14.5.100/Chart.yaml | 10 + .../rancher-monitoring-crd/14.5.100/README.md | 24 + .../crd-manifest/crd-alertmanagerconfigs.yaml | 1869 +++++ .../crd-manifest/crd-alertmanagers.yaml | 3218 ++++++++ .../crd-manifest/crd-podmonitors.yaml | 358 + .../14.5.100/crd-manifest/crd-probes.yaml | 202 + .../crd-manifest/crd-prometheuses.yaml | 4432 ++++++++++ .../crd-manifest/crd-prometheusrules.yaml | 90 + .../crd-manifest/crd-servicemonitors.yaml | 375 + .../crd-manifest/crd-thanosrulers.yaml | 3342 ++++++++ .../14.5.100/templates/_helpers.tpl | 29 + .../14.5.100/templates/jobs.yaml | 117 + .../14.5.100/templates/manifest.yaml | 14 + .../14.5.100/templates/rbac.yaml | 72 + .../14.5.100/values.yaml | 11 + .../rancher-monitoring/14.5.100/.helmignore | 26 + .../rancher-monitoring/14.5.100/CHANGELOG.md | 47 + .../14.5.100/CONTRIBUTING.md | 12 + .../rancher-monitoring/14.5.100/Chart.yaml | 103 + .../rancher-monitoring/14.5.100/README.md | 455 ++ .../rancher-monitoring/14.5.100/app-README.md | 15 + .../14.5.100/charts/grafana/.helmignore | 23 + .../14.5.100/charts/grafana/Chart.yaml | 28 + .../14.5.100/charts/grafana/README.md | 514 ++ .../grafana/dashboards/custom-dashboard.json | 1 + .../charts/grafana/templates/NOTES.txt | 54 + .../charts/grafana/templates/_helpers.tpl | 145 + .../charts/grafana/templates/_pod.tpl | 496 ++ .../charts/grafana/templates/clusterrole.yaml | 25 + .../grafana/templates/clusterrolebinding.yaml | 24 + .../configmap-dashboard-provider.yaml | 29 + .../charts/grafana/templates/configmap.yaml | 80 + .../templates/dashboards-json-configmap.yaml | 35 + .../charts/grafana/templates/deployment.yaml | 48 + .../grafana/templates/headless-service.yaml | 18 + .../templates/image-renderer-deployment.yaml | 117 + .../image-renderer-network-policy.yaml | 76 + .../templates/image-renderer-service.yaml | 28 + .../charts/grafana/templates/ingress.yaml | 80 + .../grafana/templates/nginx-config.yaml | 75 + .../templates/poddisruptionbudget.yaml | 22 + .../grafana/templates/podsecuritypolicy.yaml | 49 + .../charts/grafana/templates/pvc.yaml | 33 + .../charts/grafana/templates/role.yaml | 32 + .../charts/grafana/templates/rolebinding.yaml | 25 + .../charts/grafana/templates/secret-env.yaml | 14 + .../charts/grafana/templates/secret.yaml | 22 + .../charts/grafana/templates/service.yaml | 50 + .../grafana/templates/serviceaccount.yaml | 13 + .../grafana/templates/servicemonitor.yaml | 40 + .../charts/grafana/templates/statefulset.yaml | 52 + .../templates/tests/test-configmap.yaml | 17 + .../tests/test-podsecuritypolicy.yaml | 30 + .../grafana/templates/tests/test-role.yaml | 14 + .../templates/tests/test-rolebinding.yaml | 17 + .../templates/tests/test-serviceaccount.yaml | 9 + .../charts/grafana/templates/tests/test.yaml | 48 + .../14.5.100/charts/grafana/values.yaml | 732 ++ .../14.5.100/charts/k3sServer/.helmignore | 23 + .../14.5.100/charts/k3sServer/Chart.yaml | 13 + .../14.5.100/charts/k3sServer/README.md | 54 + .../charts/k3sServer/templates/_helpers.tpl | 87 + .../templates/pushprox-clients-rbac.yaml | 74 + .../k3sServer/templates/pushprox-clients.yaml | 135 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../k3sServer/templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 39 + .../14.5.100/charts/k3sServer/values.yaml | 86 + .../charts/kube-state-metrics/.helmignore | 21 + .../charts/kube-state-metrics/Chart.yaml | 24 + .../charts/kube-state-metrics/LICENSE | 202 + .../charts/kube-state-metrics/README.md | 66 + .../kube-state-metrics/templates/NOTES.txt | 10 + .../kube-state-metrics/templates/_helpers.tpl | 76 + .../templates/clusterrolebinding.yaml | 23 + .../templates/deployment.yaml | 217 + .../templates/kubeconfig-secret.yaml | 15 + .../kube-state-metrics/templates/pdb.yaml | 20 + .../templates/podsecuritypolicy.yaml | 42 + .../templates/psp-clusterrole.yaml | 22 + .../templates/psp-clusterrolebinding.yaml | 19 + .../kube-state-metrics/templates/role.yaml | 192 + .../templates/rolebinding.yaml | 27 + .../kube-state-metrics/templates/service.yaml | 42 + .../templates/serviceaccount.yaml | 18 + .../templates/servicemonitor.yaml | 34 + .../templates/stsdiscovery-role.yaml | 29 + .../templates/stsdiscovery-rolebinding.yaml | 20 + .../charts/kube-state-metrics/values.yaml | 184 + .../kubeAdmControllerManager/.helmignore | 23 + .../kubeAdmControllerManager/Chart.yaml | 13 + .../charts/kubeAdmControllerManager/README.md | 54 + .../templates/_helpers.tpl | 87 + .../templates/pushprox-clients-rbac.yaml | 74 + .../templates/pushprox-clients.yaml | 135 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 39 + .../kubeAdmControllerManager/values.yaml | 86 + .../14.5.100/charts/kubeAdmEtcd/.helmignore | 23 + .../14.5.100/charts/kubeAdmEtcd/Chart.yaml | 13 + .../14.5.100/charts/kubeAdmEtcd/README.md | 54 + .../charts/kubeAdmEtcd/templates/_helpers.tpl | 87 + .../templates/pushprox-clients-rbac.yaml | 74 + .../templates/pushprox-clients.yaml | 135 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../kubeAdmEtcd/templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 39 + .../14.5.100/charts/kubeAdmEtcd/values.yaml | 86 + .../14.5.100/charts/kubeAdmProxy/.helmignore | 23 + .../14.5.100/charts/kubeAdmProxy/Chart.yaml | 13 + .../14.5.100/charts/kubeAdmProxy/README.md | 54 + .../kubeAdmProxy/templates/_helpers.tpl | 87 + .../templates/pushprox-clients-rbac.yaml | 74 + .../templates/pushprox-clients.yaml | 135 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 39 + .../14.5.100/charts/kubeAdmProxy/values.yaml | 86 + .../charts/kubeAdmScheduler/.helmignore | 23 + .../charts/kubeAdmScheduler/Chart.yaml | 13 + .../charts/kubeAdmScheduler/README.md | 54 + .../kubeAdmScheduler/templates/_helpers.tpl | 87 + .../templates/pushprox-clients-rbac.yaml | 74 + .../templates/pushprox-clients.yaml | 135 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 39 + .../charts/kubeAdmScheduler/values.yaml | 86 + .../charts/prometheus-adapter/.helmignore | 21 + .../charts/prometheus-adapter/Chart.yaml | 26 + .../charts/prometheus-adapter/README.md | 147 + .../prometheus-adapter/templates/NOTES.txt | 9 + .../prometheus-adapter/templates/_helpers.tpl | 72 + .../templates/certmanager.yaml | 48 + .../cluster-role-binding-auth-delegator.yaml | 19 + .../cluster-role-binding-resource-reader.yaml | 19 + .../cluster-role-resource-reader.yaml | 23 + .../templates/configmap.yaml | 96 + .../templates/custom-metrics-apiservice.yaml | 32 + ...stom-metrics-cluster-role-binding-hpa.yaml | 23 + .../custom-metrics-cluster-role.yaml | 16 + .../templates/deployment.yaml | 135 + .../external-metrics-apiservice.yaml | 32 + ...rnal-metrics-cluster-role-binding-hpa.yaml | 19 + .../external-metrics-cluster-role.yaml | 20 + .../prometheus-adapter/templates/pdb.yaml | 22 + .../prometheus-adapter/templates/psp.yaml | 68 + .../resource-metrics-apiservice.yaml | 32 + ...resource-metrics-cluster-role-binding.yaml | 19 + .../resource-metrics-cluster-role.yaml | 22 + .../templates/role-binding-auth-reader.yaml | 20 + .../prometheus-adapter/templates/secret.yaml | 15 + .../prometheus-adapter/templates/service.yaml | 22 + .../templates/serviceaccount.yaml | 12 + .../charts/prometheus-adapter/values.yaml | 180 + .../prometheus-node-exporter/.helmignore | 21 + .../prometheus-node-exporter/Chart.yaml | 23 + .../charts/prometheus-node-exporter/README.md | 63 + .../templates/NOTES.txt | 15 + .../templates/_helpers.tpl | 95 + .../templates/daemonset.yaml | 183 + .../templates/endpoints.yaml | 18 + .../templates/monitor.yaml | 32 + .../templates/psp-clusterrole.yaml | 15 + .../templates/psp-clusterrolebinding.yaml | 17 + .../templates/psp.yaml | 52 + .../templates/service.yaml | 23 + .../templates/serviceaccount.yaml | 18 + .../prometheus-node-exporter/values.yaml | 177 + .../charts/rke2ControllerManager/.helmignore | 23 + .../charts/rke2ControllerManager/Chart.yaml | 13 + .../charts/rke2ControllerManager/README.md | 54 + .../templates/_helpers.tpl | 87 + .../templates/pushprox-clients-rbac.yaml | 74 + .../templates/pushprox-clients.yaml | 135 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 39 + .../charts/rke2ControllerManager/values.yaml | 86 + .../14.5.100/charts/rke2Etcd/.helmignore | 23 + .../14.5.100/charts/rke2Etcd/Chart.yaml | 13 + .../14.5.100/charts/rke2Etcd/README.md | 54 + .../charts/rke2Etcd/templates/_helpers.tpl | 87 + .../templates/pushprox-clients-rbac.yaml | 74 + .../rke2Etcd/templates/pushprox-clients.yaml | 135 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../rke2Etcd/templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 39 + .../14.5.100/charts/rke2Etcd/values.yaml | 86 + .../14.5.100/charts/rke2Proxy/.helmignore | 23 + .../14.5.100/charts/rke2Proxy/Chart.yaml | 13 + .../14.5.100/charts/rke2Proxy/README.md | 54 + .../charts/rke2Proxy/templates/_helpers.tpl | 87 + .../templates/pushprox-clients-rbac.yaml | 74 + .../rke2Proxy/templates/pushprox-clients.yaml | 135 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../rke2Proxy/templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 39 + .../14.5.100/charts/rke2Proxy/values.yaml | 86 + .../14.5.100/charts/rke2Scheduler/.helmignore | 23 + .../14.5.100/charts/rke2Scheduler/Chart.yaml | 13 + .../14.5.100/charts/rke2Scheduler/README.md | 54 + .../rke2Scheduler/templates/_helpers.tpl | 87 + .../templates/pushprox-clients-rbac.yaml | 74 + .../templates/pushprox-clients.yaml | 135 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 39 + .../14.5.100/charts/rke2Scheduler/values.yaml | 86 + .../charts/rkeControllerManager/.helmignore | 23 + .../charts/rkeControllerManager/Chart.yaml | 13 + .../charts/rkeControllerManager/README.md | 54 + .../templates/_helpers.tpl | 87 + .../templates/pushprox-clients-rbac.yaml | 74 + .../templates/pushprox-clients.yaml | 135 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 39 + .../charts/rkeControllerManager/values.yaml | 86 + .../14.5.100/charts/rkeEtcd/.helmignore | 23 + .../14.5.100/charts/rkeEtcd/Chart.yaml | 13 + .../14.5.100/charts/rkeEtcd/README.md | 54 + .../charts/rkeEtcd/templates/_helpers.tpl | 87 + .../templates/pushprox-clients-rbac.yaml | 74 + .../rkeEtcd/templates/pushprox-clients.yaml | 135 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../rkeEtcd/templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 39 + .../14.5.100/charts/rkeEtcd/values.yaml | 86 + .../14.5.100/charts/rkeProxy/.helmignore | 23 + .../14.5.100/charts/rkeProxy/Chart.yaml | 13 + .../14.5.100/charts/rkeProxy/README.md | 54 + .../charts/rkeProxy/templates/_helpers.tpl | 87 + .../templates/pushprox-clients-rbac.yaml | 74 + .../rkeProxy/templates/pushprox-clients.yaml | 135 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../rkeProxy/templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 39 + .../14.5.100/charts/rkeProxy/values.yaml | 86 + .../14.5.100/charts/rkeScheduler/.helmignore | 23 + .../14.5.100/charts/rkeScheduler/Chart.yaml | 13 + .../14.5.100/charts/rkeScheduler/README.md | 54 + .../rkeScheduler/templates/_helpers.tpl | 87 + .../templates/pushprox-clients-rbac.yaml | 74 + .../templates/pushprox-clients.yaml | 135 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 39 + .../14.5.100/charts/rkeScheduler/values.yaml | 86 + .../charts/windowsExporter/.helmignore | 23 + .../charts/windowsExporter/Chart.yaml | 15 + .../14.5.100/charts/windowsExporter/README.md | 17 + .../scripts/check-wins-version.ps1 | 20 + .../windowsExporter/scripts/proxy-entry.ps1 | 11 + .../charts/windowsExporter/scripts/run.ps1 | 78 + .../windowsExporter/templates/_helpers.tpl | 73 + .../windowsExporter/templates/configmap.yaml | 10 + .../windowsExporter/templates/daemonset.yaml | 77 + .../windowsExporter/templates/rbac.yaml | 78 + .../windowsExporter/templates/service.yaml | 15 + .../templates/servicemonitor.yaml | 44 + .../charts/windowsExporter/values.yaml | 46 + .../14.5.100/files/ingress-nginx/nginx.json | 1463 ++++ .../request-handling-performance.json | 981 +++ .../cluster/rancher-cluster-nodes.json | 776 ++ .../rancher/cluster/rancher-cluster.json | 759 ++ .../rancher/home/rancher-default-home.json | 1273 +++ .../files/rancher/k8s/rancher-etcd-nodes.json | 670 ++ .../files/rancher/k8s/rancher-etcd.json | 652 ++ .../k8s/rancher-k8s-components-nodes.json | 510 ++ .../rancher/k8s/rancher-k8s-components.json | 502 ++ .../rancher/nodes/rancher-node-detail.json | 789 ++ .../files/rancher/nodes/rancher-node.json | 776 ++ .../rancher/pods/rancher-pod-containers.json | 620 ++ .../files/rancher/pods/rancher-pod.json | 620 ++ .../workloads/rancher-workload-pods.json | 636 ++ .../rancher/workloads/rancher-workload.json | 636 ++ .../14.5.100/templates/NOTES.txt | 4 + .../14.5.100/templates/_helpers.tpl | 200 + .../templates/alertmanager/alertmanager.yaml | 147 + .../templates/alertmanager/cleanupSecret.yaml | 88 + .../templates/alertmanager/ingress.yaml | 65 + .../alertmanager/ingressperreplica.yaml | 62 + .../alertmanager/podDisruptionBudget.yaml | 21 + .../templates/alertmanager/psp-role.yaml | 21 + .../alertmanager/psp-rolebinding.yaml | 18 + .../14.5.100/templates/alertmanager/psp.yaml | 52 + .../templates/alertmanager/secret.yaml | 166 + .../templates/alertmanager/service.yaml | 50 + .../alertmanager/serviceaccount.yaml | 16 + .../alertmanager/servicemonitor.yaml | 42 + .../alertmanager/serviceperreplica.yaml | 46 + .../templates/exporters/core-dns/service.yaml | 24 + .../exporters/core-dns/servicemonitor.yaml | 33 + .../kube-api-server/servicemonitor.yaml | 36 + .../kube-controller-manager/endpoints.yaml | 20 + .../kube-controller-manager/service.yaml | 27 + .../servicemonitor.yaml | 44 + .../templates/exporters/kube-dns/service.yaml | 28 + .../exporters/kube-dns/servicemonitor.yaml | 46 + .../exporters/kube-etcd/endpoints.yaml | 20 + .../exporters/kube-etcd/service.yaml | 27 + .../exporters/kube-etcd/servicemonitor.yaml | 50 + .../exporters/kube-proxy/endpoints.yaml | 20 + .../exporters/kube-proxy/service.yaml | 27 + .../exporters/kube-proxy/servicemonitor.yaml | 38 + .../exporters/kube-scheduler/endpoints.yaml | 20 + .../exporters/kube-scheduler/service.yaml | 27 + .../kube-scheduler/servicemonitor.yaml | 44 + .../kube-state-metrics/serviceMonitor.yaml | 34 + .../exporters/kubelet/servicemonitor.yaml | 151 + .../node-exporter/servicemonitor.yaml | 32 + .../grafana/configmap-dashboards.yaml | 24 + .../grafana/configmaps-datasources.yaml | 43 + .../grafana/dashboards-1.14/apiserver.yaml | 1747 ++++ .../dashboards-1.14/cluster-total.yaml | 1882 +++++ .../dashboards-1.14/controller-manager.yaml | 1153 +++ .../grafana/dashboards-1.14/etcd.yaml | 1118 +++ .../grafana/dashboards-1.14/k8s-coredns.yaml | 1529 ++++ .../k8s-resources-cluster.yaml | 2582 ++++++ .../k8s-resources-namespace.yaml | 2286 ++++++ .../dashboards-1.14/k8s-resources-node.yaml | 978 +++ .../dashboards-1.14/k8s-resources-pod.yaml | 1772 ++++ .../k8s-resources-workload.yaml | 2034 +++++ .../k8s-resources-workloads-namespace.yaml | 2195 +++++ .../grafana/dashboards-1.14/kubelet.yaml | 2533 ++++++ .../dashboards-1.14/namespace-by-pod.yaml | 1464 ++++ .../namespace-by-workload.yaml | 1736 ++++ .../node-cluster-rsrc-use.yaml | 964 +++ .../dashboards-1.14/node-rsrc-use.yaml | 991 +++ .../grafana/dashboards-1.14/nodes.yaml | 997 +++ .../persistentvolumesusage.yaml | 577 ++ .../grafana/dashboards-1.14/pod-total.yaml | 1228 +++ .../prometheus-remote-write.yaml | 1670 ++++ .../grafana/dashboards-1.14/prometheus.yaml | 1227 +++ .../grafana/dashboards-1.14/proxy.yaml | 1232 +++ .../grafana/dashboards-1.14/scheduler.yaml | 1076 +++ .../grafana/dashboards-1.14/statefulset.yaml | 928 +++ .../dashboards-1.14/workload-total.yaml | 1438 ++++ .../templates/grafana/dashboards/etcd.yaml | 1118 +++ .../dashboards/k8s-cluster-rsrc-use.yaml | 959 +++ .../grafana/dashboards/k8s-node-rsrc-use.yaml | 986 +++ .../dashboards/k8s-resources-cluster.yaml | 1479 ++++ .../dashboards/k8s-resources-namespace.yaml | 963 +++ .../grafana/dashboards/k8s-resources-pod.yaml | 1006 +++ .../dashboards/k8s-resources-workload.yaml | 936 +++ .../k8s-resources-workloads-namespace.yaml | 972 +++ .../templates/grafana/dashboards/nodes.yaml | 1383 ++++ .../dashboards/persistentvolumesusage.yaml | 573 ++ .../templates/grafana/dashboards/pods.yaml | 680 ++ .../grafana/dashboards/statefulset.yaml | 926 +++ .../templates/grafana/namespaces.yaml | 13 + .../templates/grafana/servicemonitor.yaml | 32 + .../job-patch/clusterrole.yaml | 33 + .../job-patch/clusterrolebinding.yaml | 20 + .../job-patch/job-createSecret.yaml | 65 + .../job-patch/job-patchWebhook.yaml | 66 + .../admission-webhooks/job-patch/psp.yaml | 54 + .../admission-webhooks/job-patch/role.yaml | 21 + .../job-patch/rolebinding.yaml | 21 + .../job-patch/serviceaccount.yaml | 15 + .../mutatingWebhookConfiguration.yaml | 41 + .../validatingWebhookConfiguration.yaml | 41 + .../prometheus-operator/certmanager.yaml | 57 + .../prometheus-operator/clusterrole.yaml | 80 + .../clusterrolebinding.yaml | 17 + .../prometheus-operator/deployment.yaml | 145 + .../prometheus-operator/psp-clusterrole.yaml | 20 + .../psp-clusterrolebinding.yaml | 17 + .../templates/prometheus-operator/psp.yaml | 51 + .../prometheus-operator/service.yaml | 55 + .../prometheus-operator/serviceaccount.yaml | 12 + .../prometheus-operator/servicemonitor.yaml | 44 + .../14.5.100/templates/prometheus/_rules.tpl | 38 + .../additionalAlertRelabelConfigs.yaml | 16 + .../additionalAlertmanagerConfigs.yaml | 16 + .../prometheus/additionalPrometheusRules.yaml | 40 + .../prometheus/additionalScrapeConfigs.yaml | 16 + .../templates/prometheus/clusterrole.yaml | 30 + .../prometheus/clusterrolebinding.yaml | 18 + .../templates/prometheus/ingress.yaml | 65 + .../prometheus/ingressThanosSidecar.yaml | 64 + .../prometheus/ingressperreplica.yaml | 62 + .../templates/prometheus/nginx-config.yaml | 66 + .../prometheus/podDisruptionBudget.yaml | 21 + .../templates/prometheus/podmonitors.yaml | 37 + .../templates/prometheus/prometheus.yaml | 319 + .../templates/prometheus/psp-clusterrole.yaml | 20 + .../prometheus/psp-clusterrolebinding.yaml | 18 + .../14.5.100/templates/prometheus/psp.yaml | 62 + .../rules-1.14/alertmanager.rules.yaml | 70 + .../templates/prometheus/rules-1.14/etcd.yaml | 181 + .../prometheus/rules-1.14/general.rules.yaml | 56 + .../prometheus/rules-1.14/k8s.rules.yaml | 117 + .../kube-apiserver-availability.rules.yaml | 160 + .../rules-1.14/kube-apiserver-slos.yaml | 95 + .../rules-1.14/kube-apiserver.rules.yaml | 358 + .../kube-prometheus-general.rules.yaml | 31 + .../kube-prometheus-node-recording.rules.yaml | 39 + .../rules-1.14/kube-scheduler.rules.yaml | 65 + .../rules-1.14/kube-state-metrics.yaml | 59 + .../prometheus/rules-1.14/kubelet.rules.yaml | 39 + .../rules-1.14/kubernetes-apps.yaml | 298 + .../rules-1.14/kubernetes-resources.yaml | 159 + .../rules-1.14/kubernetes-storage.yaml | 75 + .../kubernetes-system-apiserver.yaml | 98 + .../kubernetes-system-controller-manager.yaml | 43 + .../rules-1.14/kubernetes-system-kubelet.yaml | 188 + .../kubernetes-system-scheduler.yaml | 43 + .../rules-1.14/kubernetes-system.yaml | 55 + .../rules-1.14/node-exporter.rules.yaml | 79 + .../prometheus/rules-1.14/node-exporter.yaml | 262 + .../prometheus/rules-1.14/node-network.yaml | 37 + .../prometheus/rules-1.14/node.rules.yaml | 51 + .../rules-1.14/prometheus-operator.yaml | 113 + .../prometheus/rules-1.14/prometheus.yaml | 258 + .../prometheus/rules/alertmanager.rules.yaml | 63 + .../templates/prometheus/rules/etcd.yaml | 181 + .../prometheus/rules/general.rules.yaml | 56 + .../templates/prometheus/rules/k8s.rules.yaml | 83 + .../rules/kube-apiserver.rules.yaml | 39 + .../kube-prometheus-node-alerting.rules.yaml | 47 + .../kube-prometheus-node-recording.rules.yaml | 41 + .../rules/kube-scheduler.rules.yaml | 65 + .../prometheus/rules/kubernetes-absent.yaml | 159 + .../prometheus/rules/kubernetes-apps.yaml | 200 + .../rules/kubernetes-resources.yaml | 121 + .../prometheus/rules/kubernetes-storage.yaml | 72 + .../prometheus/rules/kubernetes-system.yaml | 184 + .../prometheus/rules/node-network.yaml | 57 + .../templates/prometheus/rules/node-time.yaml | 37 + .../prometheus/rules/node.rules.yaml | 202 + .../prometheus/rules/prometheus-operator.yaml | 49 + .../prometheus/rules/prometheus.rules.yaml | 139 + .../templates/prometheus/service.yaml | 60 + .../prometheus/serviceThanosSidecar.yaml | 30 + .../templates/prometheus/serviceaccount.yaml | 16 + .../templates/prometheus/servicemonitor.yaml | 42 + .../templates/prometheus/servicemonitors.yaml | 38 + .../prometheus/serviceperreplica.yaml | 46 + .../rancher-monitoring/clusterrole.yaml | 131 + .../rancher-monitoring/config-role.yaml | 48 + .../rancher-monitoring/dashboard-role.yaml | 47 + .../addons/ingress-nginx-dashboard.yaml | 18 + .../rancher/cluster-dashboards.yaml | 17 + .../dashboards/rancher/default-dashboard.yaml | 17 + .../dashboards/rancher/k8s-dashboards.yaml | 17 + .../dashboards/rancher/nodes-dashboards.yaml | 17 + .../dashboards/rancher/pods-dashboards.yaml | 17 + .../rancher/workload-dashboards.yaml | 17 + .../exporters/ingress-nginx/service.yaml | 24 + .../ingress-nginx/servicemonitor.yaml | 33 + .../rancher-monitoring/hardened.yaml | 124 + .../templates/validate-install-crd.yaml | 21 + .../rancher-monitoring/14.5.100/values.yaml | 2956 +++++++ .../1.16.201/.helmignore | 21 + .../rancher-node-exporter/1.16.201/Chart.yaml | 23 + .../rancher-node-exporter/1.16.201/OWNERS | 6 + .../rancher-node-exporter/1.16.201/README.md | 63 + .../1.16.201/ci/port-values.yaml | 3 + .../1.16.201/templates/NOTES.txt | 15 + .../1.16.201/templates/_helpers.tpl | 95 + .../1.16.201/templates/daemonset.yaml | 183 + .../1.16.201/templates/endpoints.yaml | 18 + .../1.16.201/templates/monitor.yaml | 32 + .../1.16.201/templates/psp-clusterrole.yaml | 15 + .../templates/psp-clusterrolebinding.yaml | 17 + .../1.16.201/templates/psp.yaml | 52 + .../1.16.201/templates/service.yaml | 23 + .../1.16.201/templates/serviceaccount.yaml | 18 + .../1.16.201/values.yaml | 177 + .../rancher-operator-crd/0.1.400/Chart.yaml | 11 + .../0.1.400/templates/crds.yaml | 3304 ++++++++ .../rancher-operator/0.1.400/Chart.yaml | 14 + .../0.1.400/templates/_helpers.tpl | 7 + .../0.1.400/templates/deployment.yaml | 23 + .../0.1.400/templates/rbac.yaml | 44 + .../0.1.400/templates/serviceaccount.yaml | 4 + .../rancher-operator/0.1.400/values.yaml | 8 + .../rancher-prom2teams/0.2.000/.helmignore | 22 + .../rancher-prom2teams/0.2.000/Chart.yaml | 10 + .../rancher-prom2teams/0.2.000/files/teams.j2 | 44 + .../0.2.000/templates/NOTES.txt | 2 + .../0.2.000/templates/_helpers.tpl | 73 + .../0.2.000/templates/configmap.yaml | 39 + .../0.2.000/templates/deployment.yaml | 77 + .../0.2.000/templates/psp.yaml | 28 + .../0.2.000/templates/role.yaml | 15 + .../0.2.000/templates/rolebinding.yaml | 13 + .../0.2.000/templates/service-account.yaml | 6 + .../0.2.000/templates/service.yaml | 17 + .../rancher-prom2teams/0.2.000/values.yaml | 62 + .../2.12.101/.helmignore | 21 + .../2.12.101/Chart.yaml | 26 + .../2.12.101/README.md | 147 + .../2.12.101/ci/default-values.yaml | 0 .../2.12.101/ci/external-rules-values.yaml | 9 + .../2.12.101/templates/NOTES.txt | 9 + .../2.12.101/templates/_helpers.tpl | 72 + .../2.12.101/templates/certmanager.yaml | 48 + .../cluster-role-binding-auth-delegator.yaml | 19 + .../cluster-role-binding-resource-reader.yaml | 19 + .../cluster-role-resource-reader.yaml | 23 + .../2.12.101/templates/configmap.yaml | 96 + .../templates/custom-metrics-apiservice.yaml | 32 + ...stom-metrics-cluster-role-binding-hpa.yaml | 23 + .../custom-metrics-cluster-role.yaml | 16 + .../2.12.101/templates/deployment.yaml | 135 + .../external-metrics-apiservice.yaml | 32 + ...rnal-metrics-cluster-role-binding-hpa.yaml | 19 + .../external-metrics-cluster-role.yaml | 20 + .../2.12.101/templates/pdb.yaml | 22 + .../2.12.101/templates/psp.yaml | 68 + .../resource-metrics-apiservice.yaml | 32 + ...resource-metrics-cluster-role-binding.yaml | 19 + .../resource-metrics-cluster-role.yaml | 22 + .../templates/role-binding-auth-reader.yaml | 20 + .../2.12.101/templates/secret.yaml | 15 + .../2.12.101/templates/service.yaml | 22 + .../2.12.101/templates/serviceaccount.yaml | 12 + .../2.12.101/values.yaml | 180 + .../rancher-pushprox/0.1.300/.helmignore | 23 + .../rancher-pushprox/0.1.300/Chart.yaml | 13 + .../rancher-pushprox/0.1.300/README.md | 54 + .../0.1.300/templates/_helpers.tpl | 87 + .../templates/pushprox-clients-rbac.yaml | 74 + .../0.1.300/templates/pushprox-clients.yaml | 135 + .../templates/pushprox-proxy-rbac.yaml | 63 + .../0.1.300/templates/pushprox-proxy.yaml | 52 + .../templates/pushprox-servicemonitor.yaml | 39 + .../rancher-pushprox/0.1.300/values.yaml | 86 + .../rancher-sachet/1.0.100/.helmignore | 23 + .../rancher-sachet/1.0.100/Chart.yaml | 11 + .../1.0.100/files/template.tmpl | 1 + .../1.0.100/templates/NOTES.txt | 3 + .../1.0.100/templates/_helpers.tpl | 79 + .../templates/configmap-pre-install.yaml | 34 + .../1.0.100/templates/deployment.yaml | 75 + .../rancher-sachet/1.0.100/templates/psp.yaml | 28 + .../1.0.100/templates/role.yaml | 15 + .../1.0.100/templates/rolebinding.yaml | 13 + .../1.0.100/templates/service-account.yaml | 6 + .../1.0.100/templates/service.yaml | 17 + .../rancher-sachet/1.0.100/values.yaml | 63 + .../rancher-tracing/1.20.100/.helmignore | 23 + .../rancher-tracing/1.20.100/Chart.yaml | 12 + .../rancher-tracing/1.20.100/README.md | 5 + .../1.20.100/templates/_affinity.tpl | 92 + .../1.20.100/templates/_helpers.tpl | 32 + .../1.20.100/templates/deployment.yaml | 86 + .../1.20.100/templates/psp.yaml | 86 + .../1.20.100/templates/pvc.yaml | 16 + .../1.20.100/templates/service.yaml | 63 + .../rancher-tracing/1.20.100/values.yaml | 44 + .../rancher-webhook/0.1.000/Chart.yaml | 11 + .../0.1.000/templates/_helpers.tpl | 7 + .../0.1.000/templates/deployment.yaml | 26 + .../0.1.000/templates/rbac.yaml | 12 + .../0.1.000/templates/service.yaml | 13 + .../0.1.000/templates/serviceaccount.yaml | 4 + .../0.1.000/templates/webhook.yaml | 19 + .../rancher-webhook/0.1.000/values.yaml | 8 + .../0.1.000/.helmignore | 23 + .../0.1.000/Chart.yaml | 15 + .../0.1.000/README.md | 17 + .../0.1.000/scripts/check-wins-version.ps1 | 20 + .../0.1.000/scripts/proxy-entry.ps1 | 11 + .../0.1.000/scripts/run.ps1 | 78 + .../0.1.000/templates/_helpers.tpl | 73 + .../0.1.000/templates/configmap.yaml | 10 + .../0.1.000/templates/daemonset.yaml | 77 + .../0.1.000/templates/rbac.yaml | 78 + .../0.1.000/templates/service.yaml | 15 + .../0.1.000/templates/servicemonitor.yaml | 44 + .../0.1.000/values.yaml | 46 + .../rancher-wins-upgrader/0.0.100/.helmignore | 23 + .../rancher-wins-upgrader/0.0.100/Chart.yaml | 15 + .../rancher-wins-upgrader/0.0.100/README.md | 41 + .../0.0.100/app-readme.md | 19 + .../0.0.100/scripts/noop.ps1 | 4 + .../0.0.100/scripts/upgrade.ps1 | 72 + .../0.0.100/templates/_helpers.tpl | 63 + .../0.0.100/templates/configmap.yaml | 17 + .../0.0.100/templates/daemonset.yaml | 72 + .../0.0.100/templates/rbac.yaml | 70 + .../rancher-wins-upgrader/0.0.100/values.yaml | 60 + index.yaml | 845 ++ 1246 files changed, 164408 insertions(+) create mode 100755 assets/fleet-agent/fleet-agent-0.3.500.tgz create mode 100755 assets/fleet-crd/fleet-crd-0.3.500.tgz create mode 100755 assets/fleet/fleet-0.3.500.tgz create mode 100755 assets/longhorn/longhorn-1.1.100.tgz create mode 100755 assets/longhorn/longhorn-crd-1.1.100.tgz create mode 100755 assets/rancher-alerting-drivers/rancher-alerting-drivers-1.0.100.tgz create mode 100755 assets/rancher-backup-crd/rancher-backup-crd-1.0.400.tgz create mode 100755 assets/rancher-backup/rancher-backup-1.0.400.tgz create mode 100755 assets/rancher-cis-benchmark/rancher-cis-benchmark-1.0.400.tgz create mode 100755 assets/rancher-cis-benchmark/rancher-cis-benchmark-crd-1.0.400.tgz create mode 100755 assets/rancher-gatekeeper/rancher-gatekeeper-3.3.001.tgz create mode 100755 assets/rancher-gatekeeper/rancher-gatekeeper-crd-3.3.001.tgz create mode 100755 assets/rancher-grafana/rancher-grafana-6.6.401.tgz create mode 100755 assets/rancher-istio-1.8/rancher-istio-1.8.500.tgz create mode 100755 assets/rancher-istio-1.8/rancher-istio-1.8.600.tgz create mode 100755 assets/rancher-istio-1.9/rancher-istio-1.9.300.tgz create mode 100755 assets/rancher-istio-1.9/rancher-istio-1.9.500.tgz create mode 100755 assets/rancher-kiali-server/rancher-kiali-server-1.32.100.tgz create mode 100755 assets/rancher-kiali-server/rancher-kiali-server-crd-1.32.100.tgz create mode 100755 assets/rancher-kube-state-metrics/rancher-kube-state-metrics-2.13.101.tgz create mode 100755 assets/rancher-logging/rancher-logging-3.9.400.tgz create mode 100755 assets/rancher-logging/rancher-logging-crd-3.9.400.tgz create mode 100755 assets/rancher-monitoring/rancher-monitoring-14.5.100.tgz create mode 100755 assets/rancher-monitoring/rancher-monitoring-crd-14.5.100.tgz create mode 100755 assets/rancher-node-exporter/rancher-node-exporter-1.16.201.tgz create mode 100755 assets/rancher-operator-crd/rancher-operator-crd-0.1.400.tgz create mode 100755 assets/rancher-operator/rancher-operator-0.1.400.tgz create mode 100755 assets/rancher-prom2teams/rancher-prom2teams-0.2.000.tgz create mode 100755 assets/rancher-prometheus-adapter/rancher-prometheus-adapter-2.12.101.tgz create mode 100755 assets/rancher-pushprox/rancher-pushprox-0.1.300.tgz create mode 100755 assets/rancher-sachet/rancher-sachet-1.0.100.tgz create mode 100755 assets/rancher-tracing/rancher-tracing-1.20.100.tgz create mode 100755 assets/rancher-webhook/rancher-webhook-0.1.000.tgz create mode 100755 assets/rancher-windows-exporter/rancher-windows-exporter-0.1.000.tgz create mode 100755 assets/rancher-wins-upgrader/rancher-wins-upgrader-0.0.100.tgz create mode 100755 charts/fleet-agent/fleet-agent/0.3.500/Chart.yaml create mode 100755 charts/fleet-agent/fleet-agent/0.3.500/templates/_helpers.tpl create mode 100755 charts/fleet-agent/fleet-agent/0.3.500/templates/configmap.yaml create mode 100755 charts/fleet-agent/fleet-agent/0.3.500/templates/deployment.yaml create mode 100755 charts/fleet-agent/fleet-agent/0.3.500/templates/network_policy_allow_all.yaml create mode 100755 charts/fleet-agent/fleet-agent/0.3.500/templates/patch_default_serviceaccount.yaml create mode 100755 charts/fleet-agent/fleet-agent/0.3.500/templates/rbac.yaml create mode 100755 charts/fleet-agent/fleet-agent/0.3.500/templates/secret.yaml create mode 100755 charts/fleet-agent/fleet-agent/0.3.500/templates/serviceaccount.yaml create mode 100755 charts/fleet-agent/fleet-agent/0.3.500/templates/validate.yaml create mode 100755 charts/fleet-agent/fleet-agent/0.3.500/values.yaml create mode 100755 charts/fleet-crd/fleet-crd/0.3.500/Chart.yaml create mode 100755 charts/fleet-crd/fleet-crd/0.3.500/templates/crds.yaml create mode 100755 charts/fleet-crd/fleet-crd/0.3.500/templates/gitjobs-crds.yaml create mode 100755 charts/fleet/fleet/0.3.500/Chart.yaml create mode 100755 charts/fleet/fleet/0.3.500/charts/gitjob/.helmignore create mode 100755 charts/fleet/fleet/0.3.500/charts/gitjob/Chart.yaml create mode 100755 charts/fleet/fleet/0.3.500/charts/gitjob/templates/_helpers.tpl create mode 100755 charts/fleet/fleet/0.3.500/charts/gitjob/templates/clusterrole.yaml create mode 100755 charts/fleet/fleet/0.3.500/charts/gitjob/templates/clusterrolebinding.yaml create mode 100755 charts/fleet/fleet/0.3.500/charts/gitjob/templates/deployment.yaml create mode 100755 charts/fleet/fleet/0.3.500/charts/gitjob/templates/service.yaml create mode 100755 charts/fleet/fleet/0.3.500/charts/gitjob/templates/serviceaccount.yaml create mode 100755 charts/fleet/fleet/0.3.500/charts/gitjob/values.yaml create mode 100755 charts/fleet/fleet/0.3.500/templates/_helpers.tpl create mode 100755 charts/fleet/fleet/0.3.500/templates/configmap.yaml create mode 100755 charts/fleet/fleet/0.3.500/templates/deployment.yaml create mode 100755 charts/fleet/fleet/0.3.500/templates/rbac.yaml create mode 100755 charts/fleet/fleet/0.3.500/templates/serviceaccount.yaml create mode 100755 charts/fleet/fleet/0.3.500/values.yaml create mode 100755 charts/longhorn/longhorn-crd/1.1.100/Chart.yaml create mode 100755 charts/longhorn/longhorn-crd/1.1.100/README.md create mode 100755 charts/longhorn/longhorn-crd/1.1.100/templates/crds.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/.helmignore create mode 100755 charts/longhorn/longhorn/1.1.100/Chart.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/README.md create mode 100755 charts/longhorn/longhorn/1.1.100/app-readme.md create mode 100755 charts/longhorn/longhorn/1.1.100/questions.yml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/NOTES.txt create mode 100755 charts/longhorn/longhorn/1.1.100/templates/_helpers.tpl create mode 100755 charts/longhorn/longhorn/1.1.100/templates/clusterrole.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/clusterrolebinding.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/daemonset-sa.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/default-setting.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/deployment-driver.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/deployment-ui.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/ingress.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/postupgrade-job.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/psp.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/registry-secret.yml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/serviceaccount.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/storageclass.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/tls-secrets.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/uninstall-job.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/userroles.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/templates/validate-install-crd.yaml create mode 100755 charts/longhorn/longhorn/1.1.100/values.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/Chart.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/app-readme.md create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/.helmignore create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/Chart.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/files/teams.j2 create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/NOTES.txt create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/_helpers.tpl create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/configmap.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/deployment.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/psp.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/role.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/rolebinding.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/service-account.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/service.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/values.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/.helmignore create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/Chart.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/files/template.tmpl create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/NOTES.txt create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/_helpers.tpl create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/configmap-pre-install.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/deployment.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/psp.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/role.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/rolebinding.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/service-account.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/service.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/values.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/questions.yml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/templates/NOTES.txt create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/templates/_helpers.tpl create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/templates/cluster-role.yaml create mode 100755 charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/values.yaml create mode 100755 charts/rancher-backup-crd/rancher-backup-crd/1.0.400/Chart.yaml create mode 100755 charts/rancher-backup-crd/rancher-backup-crd/1.0.400/README.md create mode 100755 charts/rancher-backup-crd/rancher-backup-crd/1.0.400/templates/backup.yaml create mode 100755 charts/rancher-backup-crd/rancher-backup-crd/1.0.400/templates/resourceset.yaml create mode 100755 charts/rancher-backup-crd/rancher-backup-crd/1.0.400/templates/restore.yaml create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/Chart.yaml create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/README.md create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/app-readme.md create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/eks.yaml create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/fleet.yaml create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/gke.yaml create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/rancher-operator.yaml create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/rancher.yaml create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/templates/_helpers.tpl create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/templates/clusterrolebinding.yaml create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/templates/deployment.yaml create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/templates/pvc.yaml create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/templates/rancher-resourceset.yaml create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/templates/s3-secret.yaml create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/templates/serviceaccount.yaml create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/templates/validate-install-crd.yaml create mode 100755 charts/rancher-backup/rancher-backup/1.0.400/values.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/Chart.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/README.md create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscan.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscanbenchmark.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscanprofile.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscanreport.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/Chart.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/README.md create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/app-readme.md create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/_helpers.tpl create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/alertingrule.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-cis-1.5.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-cis-1.6.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-eks-1.0.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-gke-1.0.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-k3s-cis-1.6-hardened.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-k3s-cis-1.6-permissive.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.5-hardened.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.5-permissive.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.6-hardened.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.6-permissive.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.5-hardened.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.5-permissive.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.6-hardened.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.6-permissive.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/cis-roles.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/configmap.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/deployment.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/network_policy_allow_all.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/patch_default_serviceaccount.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/rbac.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-cis-1.5.yml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-cis-1.6.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-k3s-cis-1.6-hardened.yml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-k3s-cis-1.6-permissive.yml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.5-hardened.yml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.5-permissive.yml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.6-hardened.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.6-permissive.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.5-hardened.yml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.5-permissive.yml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.6-hardened.yml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.6-permissive.yml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofileeks.yml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofilegke.yml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/serviceaccount.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/validate-install-crd.yaml create mode 100755 charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/values.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/Chart.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/README.md create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/config-customresourcedefinition.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/constraintpodstatus-customresourcedefinition.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/constrainttemplate-customresourcedefinition.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/constrainttemplatepodstatus-customresourcedefinition.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/_helpers.tpl create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/jobs.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/manifest.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/rbac.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/values.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/.helmignore create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/CHANGELOG.md create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/Chart.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/README.md create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/app-readme.md create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/_helpers.tpl create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/allowedrepos.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-admin-podsecuritypolicy.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-admin-serviceaccount.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-audit-deployment.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-controller-manager-deployment.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-role-clusterrole.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-role-role.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-rolebinding-clusterrolebinding.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-rolebinding-rolebinding.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-validating-webhook-configuration-validatingwebhookconfiguration.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-webhook-server-cert-secret.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-webhook-service-service.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/requiredlabels.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/validate-install-crd.yaml create mode 100755 charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/values.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/.helmignore create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/Chart.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/README.md create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/dashboards/custom-dashboard.json create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/NOTES.txt create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/_helpers.tpl create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/_pod.tpl create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/clusterrole.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/clusterrolebinding.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/configmap-dashboard-provider.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/configmap.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/dashboards-json-configmap.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/deployment.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/headless-service.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/image-renderer-deployment.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/image-renderer-network-policy.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/image-renderer-service.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/ingress.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/nginx-config.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/poddisruptionbudget.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/podsecuritypolicy.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/pvc.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/role.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/rolebinding.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/secret-env.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/secret.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/service.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/serviceaccount.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/servicemonitor.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/statefulset.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-configmap.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-podsecuritypolicy.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-role.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-rolebinding.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-serviceaccount.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test.yaml create mode 100755 charts/rancher-grafana/rancher-grafana/6.6.401/values.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/Chart.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/README.md create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/app-readme.md create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/Chart.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/NOTES.txt create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/_helpers.tpl create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/cabundle.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/configmap.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/envoy.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/go.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/kiali.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/microprofile-1.1.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/microprofile-x.y.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/nodejs.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/quarkus.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/springboot-jvm.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/springboot-tomcat.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/thorntail.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/tomcat.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-client.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-eventbus.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-jvm.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-pool.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-server.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/deployment.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/hpa.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/ingress.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/oauth.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/psp.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/role-controlplane.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/role-viewer.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/role.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/rolebinding-controlplane.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/rolebinding.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/route.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/service.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/serviceaccount.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/validate-install-crd.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/web-root-configmap.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/values.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/.helmignore create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/Chart.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/README.md create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/_affinity.tpl create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/_helpers.tpl create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/deployment.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/psp.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/pvc.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/service.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/values.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/configs/istio-base.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/requirements.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/samples/overlay-example.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/_helpers.tpl create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/admin-role.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/base-config-map.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/clusterrole.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/clusterrolebinding.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/edit-role.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-cni-psp.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-install-job.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-install-psp.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-psp.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-uninstall-job.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/overlay-config-map.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/service-monitors.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/serviceaccount.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/view-role.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.500/values.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/Chart.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/README.md create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/app-readme.md create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/Chart.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/NOTES.txt create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/_helpers.tpl create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/cabundle.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/configmap.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/envoy.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/go.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/kiali.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/microprofile-1.1.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/microprofile-x.y.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/nodejs.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/quarkus.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/springboot-jvm.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/springboot-tomcat.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/thorntail.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/tomcat.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-client.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-eventbus.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-jvm.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-pool.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-server.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/deployment.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/hpa.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/ingress.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/oauth.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/psp.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/role-controlplane.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/role-viewer.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/role.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/rolebinding-controlplane.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/rolebinding.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/route.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/service.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/serviceaccount.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/validate-install-crd.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/web-root-configmap.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/values.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/.helmignore create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/Chart.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/README.md create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/_affinity.tpl create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/_helpers.tpl create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/deployment.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/psp.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/pvc.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/service.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/values.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/configs/istio-base.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/requirements.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/samples/overlay-example.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/_helpers.tpl create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/admin-role.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/base-config-map.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/clusterrole.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/clusterrolebinding.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/edit-role.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-cni-psp.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-install-job.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-install-psp.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-psp.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-uninstall-job.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/overlay-config-map.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/service-monitors.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/serviceaccount.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/view-role.yaml create mode 100755 charts/rancher-istio-1.8/rancher-istio/1.8.600/values.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/Chart.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/README.md create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/app-readme.md create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/Chart.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/NOTES.txt create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/_helpers.tpl create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/cabundle.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/configmap.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/envoy.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/go.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/kiali.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/microprofile-1.1.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/microprofile-x.y.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/nodejs.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/quarkus.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/springboot-jvm.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/springboot-tomcat.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/thorntail.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/tomcat.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-client.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-eventbus.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-jvm.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-pool.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-server.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/deployment.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/hpa.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/ingress.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/oauth.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/psp.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/role-controlplane.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/role-viewer.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/role.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/rolebinding-controlplane.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/rolebinding.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/route.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/service.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/serviceaccount.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/validate-install-crd.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/web-root-configmap.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/values.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/.helmignore create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/Chart.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/README.md create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/_affinity.tpl create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/_helpers.tpl create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/deployment.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/psp.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/pvc.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/service.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/values.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/configs/istio-base.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/requirements.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/samples/overlay-example.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/_helpers.tpl create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/admin-role.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/base-config-map.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/clusterrole.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/clusterrolebinding.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/edit-role.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-cni-psp.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-install-job.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-install-psp.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-psp.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-uninstall-job.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/overlay-config-map.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/service-monitors.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/serviceaccount.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/view-role.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.300/values.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/Chart.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/README.md create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/app-readme.md create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/Chart.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/NOTES.txt create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/_helpers.tpl create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/cabundle.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/configmap.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/envoy.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/go.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/kiali.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/microprofile-1.1.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/microprofile-x.y.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/nodejs.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/quarkus.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/springboot-jvm.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/springboot-tomcat.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/thorntail.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/tomcat.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-client.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-eventbus.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-jvm.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-pool.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-server.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/deployment.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/hpa.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/ingress.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/oauth.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/psp.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/role-controlplane.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/role-viewer.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/role.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/rolebinding-controlplane.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/rolebinding.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/route.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/service.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/serviceaccount.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/validate-install-crd.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/web-root-configmap.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/values.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/.helmignore create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/Chart.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/README.md create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/_affinity.tpl create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/_helpers.tpl create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/deployment.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/psp.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/pvc.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/service.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/values.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/configs/istio-base.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/requirements.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/samples/overlay-example.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/_helpers.tpl create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/admin-role.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/base-config-map.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/clusterrole.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/clusterrolebinding.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/edit-role.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-cni-psp.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-install-job.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-install-psp.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-psp.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-uninstall-job.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/overlay-config-map.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/service-monitors.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/serviceaccount.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/view-role.yaml create mode 100755 charts/rancher-istio-1.9/rancher-istio/1.9.500/values.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server-crd/1.32.100/Chart.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server-crd/1.32.100/README.md create mode 100755 charts/rancher-kiali-server/rancher-kiali-server-crd/1.32.100/templates/crds.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/Chart.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/NOTES.txt create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/_helpers.tpl create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/cabundle.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/configmap.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/envoy.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/go.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/kiali.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/micrometer-1.0.6-jvm.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/micrometer-1.1-jvm.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/microprofile-1.1.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/microprofile-x.y.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/nodejs.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/quarkus.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/springboot-jvm-pool.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/springboot-jvm.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/springboot-tomcat.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/thorntail.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/tomcat.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-client.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-eventbus.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-jvm.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-pool.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-server.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/deployment.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/hpa.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/ingress.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/oauth.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/psp.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/role-controlplane.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/role-viewer.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/role.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/rolebinding-controlplane.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/rolebinding.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/route.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/service.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/serviceaccount.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/validate-install-crd.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/web-root-configmap.yaml create mode 100755 charts/rancher-kiali-server/rancher-kiali-server/1.32.100/values.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/.helmignore create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/Chart.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/LICENSE create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/OWNERS create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/README.md create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/NOTES.txt create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/_helpers.tpl create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/clusterrolebinding.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/deployment.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/kubeconfig-secret.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/pdb.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/podsecuritypolicy.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/psp-clusterrole.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/psp-clusterrolebinding.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/role.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/rolebinding.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/service.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/serviceaccount.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/servicemonitor.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/stsdiscovery-role.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/stsdiscovery-rolebinding.yaml create mode 100755 charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/values.yaml create mode 100755 charts/rancher-logging/rancher-logging-crd/3.9.400/Chart.yaml create mode 100755 charts/rancher-logging/rancher-logging-crd/3.9.400/README.md create mode 100755 charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_clusterflows.yaml create mode 100755 charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_clusteroutputs.yaml create mode 100755 charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_flows.yaml create mode 100755 charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_loggings.yaml create mode 100755 charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_outputs.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/.helmignore create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/Chart.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/README.md create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/app-readme.md create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/NOTES.txt create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/_helpers.tpl create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/clusterrole.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/clusterrolebinding.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/crds.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/deployment.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/aks/logging.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/eks/logging.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/gke/logging.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/k3s/logging-k3s-openrc.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/k3s/logging-k3s-systemd.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke/configmap.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke/daemonset.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke2/configmap.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke2/daemonset.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke2/logging-rke2-containers.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/root/logging.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/psp.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/service.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/serviceMonitor.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/serviceaccount.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/userroles.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/validate-install-crd.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/templates/validate-install.yaml create mode 100755 charts/rancher-logging/rancher-logging/3.9.400/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-alertmanagerconfigs.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-alertmanagers.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-podmonitors.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-probes.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-prometheuses.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-prometheusrules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-servicemonitors.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-thanosrulers.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/jobs.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/manifest.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/CHANGELOG.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/CONTRIBUTING.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/app-README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/dashboards/custom-dashboard.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/NOTES.txt create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/_pod.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/clusterrole.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/clusterrolebinding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/configmap-dashboard-provider.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/configmap.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/dashboards-json-configmap.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/deployment.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/headless-service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/image-renderer-deployment.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/image-renderer-network-policy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/image-renderer-service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/ingress.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/nginx-config.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/poddisruptionbudget.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/podsecuritypolicy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/pvc.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/role.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/rolebinding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/secret-env.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/secret.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/serviceaccount.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/statefulset.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-configmap.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-podsecuritypolicy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-role.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-rolebinding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-serviceaccount.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-clients-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-clients.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-proxy-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-proxy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/LICENSE create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/NOTES.txt create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/clusterrolebinding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/deployment.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/kubeconfig-secret.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/pdb.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/podsecuritypolicy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/psp-clusterrole.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/role.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/rolebinding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/serviceaccount.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/stsdiscovery-role.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-clients-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-clients.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-proxy-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-proxy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-clients-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-clients.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-proxy-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-proxy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-clients-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-clients.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-proxy-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-proxy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-clients-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-clients.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-proxy-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-proxy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/NOTES.txt create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/certmanager.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/cluster-role-binding-auth-delegator.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/cluster-role-binding-resource-reader.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/cluster-role-resource-reader.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/configmap.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/custom-metrics-apiservice.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/custom-metrics-cluster-role-binding-hpa.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/custom-metrics-cluster-role.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/deployment.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/external-metrics-apiservice.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/external-metrics-cluster-role-binding-hpa.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/external-metrics-cluster-role.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/pdb.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/psp.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/resource-metrics-apiservice.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/resource-metrics-cluster-role-binding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/resource-metrics-cluster-role.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/role-binding-auth-reader.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/secret.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/serviceaccount.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/NOTES.txt create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/daemonset.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/endpoints.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/monitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/psp.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/serviceaccount.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-clients-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-clients.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-proxy-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-proxy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-clients-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-clients.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-proxy-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-proxy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-clients-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-clients.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-proxy-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-proxy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-clients-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-clients.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-proxy-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-proxy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-clients-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-clients.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-proxy-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-proxy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-clients-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-clients.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-proxy-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-proxy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-clients-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-clients.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-proxy-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-proxy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-clients-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-clients.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-proxy-rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-proxy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/.helmignore create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/Chart.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/README.md create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/scripts/check-wins-version.ps1 create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/scripts/proxy-entry.ps1 create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/scripts/run.ps1 create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/configmap.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/daemonset.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/rbac.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/values.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/files/ingress-nginx/nginx.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/files/ingress-nginx/request-handling-performance.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/cluster/rancher-cluster-nodes.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/cluster/rancher-cluster.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/home/rancher-default-home.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-etcd-nodes.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-etcd.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-k8s-components-nodes.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-k8s-components.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/nodes/rancher-node-detail.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/nodes/rancher-node.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/pods/rancher-pod-containers.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/pods/rancher-pod.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/workloads/rancher-workload-pods.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/workloads/rancher-workload.json create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/NOTES.txt create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/_helpers.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/alertmanager.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/cleanupSecret.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/ingress.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/ingressperreplica.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/podDisruptionBudget.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/psp-role.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/psp-rolebinding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/psp.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/secret.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/serviceaccount.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/serviceperreplica.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/core-dns/service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/core-dns/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-api-server/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-controller-manager/endpoints.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-controller-manager/service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-controller-manager/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-dns/service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-dns/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-etcd/endpoints.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-etcd/service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-etcd/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-proxy/endpoints.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-proxy/service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-proxy/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-scheduler/endpoints.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-scheduler/service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-scheduler/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-state-metrics/serviceMonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kubelet/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/node-exporter/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/configmap-dashboards.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/configmaps-datasources.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/apiserver.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/cluster-total.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/controller-manager.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/etcd.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-coredns.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-node.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/kubelet.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/namespace-by-pod.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/namespace-by-workload.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/node-rsrc-use.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/nodes.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/pod-total.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/prometheus.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/proxy.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/scheduler.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/statefulset.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/workload-total.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/etcd.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-node-rsrc-use.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-cluster.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-namespace.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-pod.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-workload.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-workloads-namespace.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/nodes.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/persistentvolumesusage.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/pods.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/statefulset.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/namespaces.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/clusterrole.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/clusterrolebinding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/psp.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/role.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/rolebinding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/certmanager.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/clusterrole.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/clusterrolebinding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/deployment.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/psp-clusterrole.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/psp-clusterrolebinding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/psp.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/serviceaccount.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/_rules.tpl create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalAlertRelabelConfigs.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalAlertmanagerConfigs.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalPrometheusRules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalScrapeConfigs.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/clusterrole.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/clusterrolebinding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/ingress.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/ingressThanosSidecar.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/ingressperreplica.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/nginx-config.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/podDisruptionBudget.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/podmonitors.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/prometheus.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/psp-clusterrole.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/psp-clusterrolebinding.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/psp.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/alertmanager.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/etcd.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/general.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/k8s.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-apiserver-availability.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-apiserver-slos.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-apiserver.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-prometheus-general.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-prometheus-node-recording.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-state-metrics.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubelet.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-apps.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-resources.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-storage.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-apiserver.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node-exporter.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node-exporter.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node-network.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/prometheus-operator.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/prometheus.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/alertmanager.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/etcd.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/general.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/k8s.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-apiserver.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-prometheus-node-alerting.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-prometheus-node-recording.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-scheduler.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-absent.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-apps.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-resources.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-storage.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-system.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/node-network.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/node-time.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/node.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/prometheus-operator.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/prometheus.rules.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/serviceThanosSidecar.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/serviceaccount.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/servicemonitors.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/serviceperreplica.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/clusterrole.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/config-role.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboard-role.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/addons/ingress-nginx-dashboard.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/cluster-dashboards.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/default-dashboard.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/k8s-dashboards.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/nodes-dashboards.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/pods-dashboards.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/workload-dashboards.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/exporters/ingress-nginx/service.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/exporters/ingress-nginx/servicemonitor.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/hardened.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/validate-install-crd.yaml create mode 100755 charts/rancher-monitoring/rancher-monitoring/14.5.100/values.yaml create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/.helmignore create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/Chart.yaml create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/OWNERS create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/README.md create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/ci/port-values.yaml create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/templates/NOTES.txt create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/templates/_helpers.tpl create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/templates/daemonset.yaml create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/templates/endpoints.yaml create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/templates/monitor.yaml create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/templates/psp-clusterrole.yaml create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/templates/psp-clusterrolebinding.yaml create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/templates/psp.yaml create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/templates/service.yaml create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/templates/serviceaccount.yaml create mode 100755 charts/rancher-node-exporter/rancher-node-exporter/1.16.201/values.yaml create mode 100755 charts/rancher-operator-crd/rancher-operator-crd/0.1.400/Chart.yaml create mode 100755 charts/rancher-operator-crd/rancher-operator-crd/0.1.400/templates/crds.yaml create mode 100755 charts/rancher-operator/rancher-operator/0.1.400/Chart.yaml create mode 100755 charts/rancher-operator/rancher-operator/0.1.400/templates/_helpers.tpl create mode 100755 charts/rancher-operator/rancher-operator/0.1.400/templates/deployment.yaml create mode 100755 charts/rancher-operator/rancher-operator/0.1.400/templates/rbac.yaml create mode 100755 charts/rancher-operator/rancher-operator/0.1.400/templates/serviceaccount.yaml create mode 100755 charts/rancher-operator/rancher-operator/0.1.400/values.yaml create mode 100755 charts/rancher-prom2teams/rancher-prom2teams/0.2.000/.helmignore create mode 100755 charts/rancher-prom2teams/rancher-prom2teams/0.2.000/Chart.yaml create mode 100755 charts/rancher-prom2teams/rancher-prom2teams/0.2.000/files/teams.j2 create mode 100755 charts/rancher-prom2teams/rancher-prom2teams/0.2.000/templates/NOTES.txt create mode 100755 charts/rancher-prom2teams/rancher-prom2teams/0.2.000/templates/_helpers.tpl create mode 100755 charts/rancher-prom2teams/rancher-prom2teams/0.2.000/templates/configmap.yaml create mode 100755 charts/rancher-prom2teams/rancher-prom2teams/0.2.000/templates/deployment.yaml create mode 100755 charts/rancher-prom2teams/rancher-prom2teams/0.2.000/templates/psp.yaml create mode 100755 charts/rancher-prom2teams/rancher-prom2teams/0.2.000/templates/role.yaml create mode 100755 charts/rancher-prom2teams/rancher-prom2teams/0.2.000/templates/rolebinding.yaml create mode 100755 charts/rancher-prom2teams/rancher-prom2teams/0.2.000/templates/service-account.yaml create mode 100755 charts/rancher-prom2teams/rancher-prom2teams/0.2.000/templates/service.yaml create mode 100755 charts/rancher-prom2teams/rancher-prom2teams/0.2.000/values.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/.helmignore create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/Chart.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/README.md create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/ci/default-values.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/ci/external-rules-values.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/NOTES.txt create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/_helpers.tpl create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/certmanager.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/cluster-role-binding-auth-delegator.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/cluster-role-binding-resource-reader.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/cluster-role-resource-reader.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/configmap.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/custom-metrics-apiservice.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/custom-metrics-cluster-role-binding-hpa.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/custom-metrics-cluster-role.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/deployment.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/external-metrics-apiservice.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/external-metrics-cluster-role-binding-hpa.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/external-metrics-cluster-role.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/pdb.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/psp.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/resource-metrics-apiservice.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/resource-metrics-cluster-role-binding.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/resource-metrics-cluster-role.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/role-binding-auth-reader.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/secret.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/service.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/templates/serviceaccount.yaml create mode 100755 charts/rancher-prometheus-adapter/rancher-prometheus-adapter/2.12.101/values.yaml create mode 100755 charts/rancher-pushprox/rancher-pushprox/0.1.300/.helmignore create mode 100755 charts/rancher-pushprox/rancher-pushprox/0.1.300/Chart.yaml create mode 100755 charts/rancher-pushprox/rancher-pushprox/0.1.300/README.md create mode 100755 charts/rancher-pushprox/rancher-pushprox/0.1.300/templates/_helpers.tpl create mode 100755 charts/rancher-pushprox/rancher-pushprox/0.1.300/templates/pushprox-clients-rbac.yaml create mode 100755 charts/rancher-pushprox/rancher-pushprox/0.1.300/templates/pushprox-clients.yaml create mode 100755 charts/rancher-pushprox/rancher-pushprox/0.1.300/templates/pushprox-proxy-rbac.yaml create mode 100755 charts/rancher-pushprox/rancher-pushprox/0.1.300/templates/pushprox-proxy.yaml create mode 100755 charts/rancher-pushprox/rancher-pushprox/0.1.300/templates/pushprox-servicemonitor.yaml create mode 100755 charts/rancher-pushprox/rancher-pushprox/0.1.300/values.yaml create mode 100755 charts/rancher-sachet/rancher-sachet/1.0.100/.helmignore create mode 100755 charts/rancher-sachet/rancher-sachet/1.0.100/Chart.yaml create mode 100755 charts/rancher-sachet/rancher-sachet/1.0.100/files/template.tmpl create mode 100755 charts/rancher-sachet/rancher-sachet/1.0.100/templates/NOTES.txt create mode 100755 charts/rancher-sachet/rancher-sachet/1.0.100/templates/_helpers.tpl create mode 100755 charts/rancher-sachet/rancher-sachet/1.0.100/templates/configmap-pre-install.yaml create mode 100755 charts/rancher-sachet/rancher-sachet/1.0.100/templates/deployment.yaml create mode 100755 charts/rancher-sachet/rancher-sachet/1.0.100/templates/psp.yaml create mode 100755 charts/rancher-sachet/rancher-sachet/1.0.100/templates/role.yaml create mode 100755 charts/rancher-sachet/rancher-sachet/1.0.100/templates/rolebinding.yaml create mode 100755 charts/rancher-sachet/rancher-sachet/1.0.100/templates/service-account.yaml create mode 100755 charts/rancher-sachet/rancher-sachet/1.0.100/templates/service.yaml create mode 100755 charts/rancher-sachet/rancher-sachet/1.0.100/values.yaml create mode 100755 charts/rancher-tracing/rancher-tracing/1.20.100/.helmignore create mode 100755 charts/rancher-tracing/rancher-tracing/1.20.100/Chart.yaml create mode 100755 charts/rancher-tracing/rancher-tracing/1.20.100/README.md create mode 100755 charts/rancher-tracing/rancher-tracing/1.20.100/templates/_affinity.tpl create mode 100755 charts/rancher-tracing/rancher-tracing/1.20.100/templates/_helpers.tpl create mode 100755 charts/rancher-tracing/rancher-tracing/1.20.100/templates/deployment.yaml create mode 100755 charts/rancher-tracing/rancher-tracing/1.20.100/templates/psp.yaml create mode 100755 charts/rancher-tracing/rancher-tracing/1.20.100/templates/pvc.yaml create mode 100755 charts/rancher-tracing/rancher-tracing/1.20.100/templates/service.yaml create mode 100755 charts/rancher-tracing/rancher-tracing/1.20.100/values.yaml create mode 100755 charts/rancher-webhook/rancher-webhook/0.1.000/Chart.yaml create mode 100755 charts/rancher-webhook/rancher-webhook/0.1.000/templates/_helpers.tpl create mode 100755 charts/rancher-webhook/rancher-webhook/0.1.000/templates/deployment.yaml create mode 100755 charts/rancher-webhook/rancher-webhook/0.1.000/templates/rbac.yaml create mode 100755 charts/rancher-webhook/rancher-webhook/0.1.000/templates/service.yaml create mode 100755 charts/rancher-webhook/rancher-webhook/0.1.000/templates/serviceaccount.yaml create mode 100755 charts/rancher-webhook/rancher-webhook/0.1.000/templates/webhook.yaml create mode 100755 charts/rancher-webhook/rancher-webhook/0.1.000/values.yaml create mode 100755 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.000/.helmignore create mode 100755 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.000/Chart.yaml create mode 100755 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.000/README.md create mode 100755 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.000/scripts/check-wins-version.ps1 create mode 100755 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.000/scripts/proxy-entry.ps1 create mode 100755 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.000/scripts/run.ps1 create mode 100755 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.000/templates/_helpers.tpl create mode 100755 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.000/templates/configmap.yaml create mode 100755 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.000/templates/daemonset.yaml create mode 100755 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.000/templates/rbac.yaml create mode 100755 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.000/templates/service.yaml create mode 100755 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.000/templates/servicemonitor.yaml create mode 100755 charts/rancher-windows-exporter/rancher-windows-exporter/0.1.000/values.yaml create mode 100755 charts/rancher-wins-upgrader/rancher-wins-upgrader/0.0.100/.helmignore create mode 100755 charts/rancher-wins-upgrader/rancher-wins-upgrader/0.0.100/Chart.yaml create mode 100755 charts/rancher-wins-upgrader/rancher-wins-upgrader/0.0.100/README.md create mode 100755 charts/rancher-wins-upgrader/rancher-wins-upgrader/0.0.100/app-readme.md create mode 100755 charts/rancher-wins-upgrader/rancher-wins-upgrader/0.0.100/scripts/noop.ps1 create mode 100755 charts/rancher-wins-upgrader/rancher-wins-upgrader/0.0.100/scripts/upgrade.ps1 create mode 100755 charts/rancher-wins-upgrader/rancher-wins-upgrader/0.0.100/templates/_helpers.tpl create mode 100755 charts/rancher-wins-upgrader/rancher-wins-upgrader/0.0.100/templates/configmap.yaml create mode 100755 charts/rancher-wins-upgrader/rancher-wins-upgrader/0.0.100/templates/daemonset.yaml create mode 100755 charts/rancher-wins-upgrader/rancher-wins-upgrader/0.0.100/templates/rbac.yaml create mode 100755 charts/rancher-wins-upgrader/rancher-wins-upgrader/0.0.100/values.yaml diff --git a/assets/fleet-agent/fleet-agent-0.3.500.tgz b/assets/fleet-agent/fleet-agent-0.3.500.tgz new file mode 100755 index 0000000000000000000000000000000000000000..215f2864bb223c35203e5fc40c276a3ee8030b6b GIT binary patch literal 2329 zcmV+!3Fh`6iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PI?CZ{s%7&ue{(fr4V6=MX8j<2Z%h<+9sid)Zu&YzrLP#e$Yb zHqR8Pl9W@|^>;szlw?^};>~WF>;j$_wnWYhhkt%ELy9Rw)B%|w*T)xAB6YYR89R7V z>h*fP(Qs)0d%a%!zju0ede9&ChQrg*U^F^A==D#A{r&;;p4NCqN(62@&IC@2@{hrCR#HLsW^^NYMN3^B9Mf~Q(C=j7Btw;x z7$b12PA!zitdm!$2pHwXADy)l84`to(O4<7OV}eh{RO3>f=4hPcqGs3*IsxMo_YzY zSkheEq92STyd#`UP{O&<00I0+^}llkM8j=N7f%keBx~zH{UR&*YjR_@(&IUn)sg+R-k%%2e1SGgR?=ujsMYbu*d&%5Y5O0 z4e|}*{5#4Mai@+f^bCq6=0Z^| z!oP|!N{$*8oL|2Mg>sG(WWhP*6U(s)#jH>o#s~^EOi`kD2ygx%Sc5(ihHiq6IyiI{&ssXcHFMw*G3W!og zh6(5?Rc)Q`FK-Qtq7#k@j2A%Q4jF?P7k3;k&ck&dF3y`>h*_buu?8npX=(kn>xZYs z%=%r+dr8K~6leh?Nz9*a2p75Fn;HaXi_kQ%03s2%&_)guAgK{B!YPS$=p8bvIPwkw zQXwK36S=mH8o&AWbv2Mxd&UZ-L`;cBxTAXNIo7wYn_a7;LRCzMnt_iK@trSw5omUu3)XH;F=%!J1)ecmZf+}Kphj48p5G2BmT0A9u;#S%inYc3x zSDWSw&QK{>hyo-M#uj$2;ciN_Ei^q$1mzQta*dJ`X0Yg**)pS!mziCH$&CwxS6hLm z8W54-Ei#O?kU+Ru5DZ-x6{NyVu$6!Wb0!v<5w?nKEHtJxbwsdS8N{!(MDXheZ&r*k z)@+O00|1!S2xy1(rW(r{zzi1=G?M^Z4j^*FX$k%af&QQsY0DUK`O8- z|34Y@TJit%?5wws|Ib0oWdI4Llq2|VL4Hi!iu_o5%Y_fYYULS5nnL)COA_6Zu5$9u zrCf{Ul5(Ab|D8JaKT82Jh4qWSwg-9Aetzlk{MM)&|FPgHon$0`lFh(Q{GSY4_#d70 z_WA#_(2VjVf(tv}??}G6f$n=5YLXC5Y;fPoGpZW5Tq5`@u<>YFhQj5t=`mai39D6E z>$?I!S}wKtNeSNKT&<4QdJMYPl2sn*NB(kINmYfbl?%nIuVH)r)l)bA*XiI>IDj4a z@1L9v+W0>m?D79B)WkcHPO?kwmYo1pf#^MUx8Q4W!={Y60HzctF=e7vf(=WU#+5cr6!Zr>Q&v^ z_T;#7g>p_~Jda~haJ{`@to&?XciFWgUT+1%=GL)x#9LdqN{bf%AsK@Z{sK;UVs@C{ zuf?i#ZKL46hBr2`wZ!&D^U+PeH1fU<>h}K}^_`HjkGWtpUVJ2si97TAdC_2J{x@p* z|3QCvGTQt9=b#`6Hgdjl<|&_qv+wPJNF5NaT-CK*6CE+3bZ3EGE#|kZDbKK$%NpL* zSy$LP=rXXZ+<7jN*0~eq6N$=tn!uG{yTGm2_lq3M(zi~=KEH8~FFoR47j@%5H^IA( zPi6EVE_yuP2kgrKMrWfI{s)88{rBHzp*H@$NhfQx_O3H4T#~?Sq%inxb zXptF)+l^6wFJ{Pp^CL(JQ@HX!taE!gQKrd&`$wM`D?Z$pKGx&%tVwIt>fe^J0*pyK z6KQ%$GpZx#KS3<(_WyEB;-|X@u*?6O=hpfE;N*1g|DS`JzTeI|DfCoG`lm(6Rt|XK zzJ%Tgh7X?#lpiaDW|1^fvS21T3xMSGM=6TD%+y}}2$onkvTs#aP>cbDB2&YUjaBEBy}3@5N#5 zLsu)W8~^Ux_)~d+UHiY&llJ-F>1Z(A3eA;=xN1{&G1U4Uxxn=zD^8Sdl%vrr5Fow4QN7q+d8&yP`*@ewk_%EkkFmUOw(w8qk<}s_hh`TIea$g$$9!>B>CKYN|RkUHx&U|Ourz-P|mlZ>QU%*`Z3 zkzLDD)Svy&NlE+0DK{S1O3hkP^NG&RGuJmEYC;&ad!?FGU)XZ~QkgGEn_5*E^c<1VDb7S=P1i+(@t{Jjpm5tpeogckG3U6p?Ig!F#EpS zUbGikPf+YNs9!zaKMn68->T2>M`{P-&E{=unKaxCy}ydTuf7Y_Y(CVyKN)fNZN5Ff z3xc#5hf@Gst$>YVVt*zt9k%$lfxcCO>_=L~)*ROHaZGCy)Em zo44541Y5EuLdoW9z!&V&gSKnTry!#x0eR6cgtn&A=fJA%FT>Hy8#!4fQ4KhCpOIve zc5fm+{VUkd+;fNlM~w@TBY)YiMGC6gD6{vjCW#^GZQIlvx(R?RSs=S-^WMx?JyT{7 z(A4m3a*`h1ar$4=tLhw3+jd#^&4~M-58<$B0$_sOI*hp6?TUU@naf@O z>5MMKzi92?a9$BLt;;l91fLr5=j^B8`UHTIp98qco4pPKAohyi^(z!jeU{e1k4T2D z??p_m%AMF4QBkyb)&fXggI}%bC6D6V2KsF0?oDn0S5AsdiuW`HZ#dv0xT*r1fJ3Lp z6oM%H+?=VwrefI2xVXySJfH$Je0>hT0Y?`=Q!SO{-tn zARo^pLyn`S@%a9}BwzDLCjqW7tP2a`tSnqo{1uEni9>Qq>8;)6t}KR6`#Lkm7TEEV zdN@r+m_2Hu5d2y!8Ch2h9o8;L1R6#kM?Y?}!w)@s;i#y-B_>?JyO>J)acjq->)tI$ z%QcK4LH7rBh-TeRg6=S0F)I|nJX6S-LeX`^P_7S7=}^@&Ijw8~7`A@4P@4BNd}lnJ zV_2JWDZ=M8wh6PxfHK|_M)t=7y6Ho)b#nN^0Jq_93<(8kE`saZfGli$;ltV0DV*zc zU8rMsS3>#ey7hoC#F(Tuen4mp3Q;N|cA~~`LJxF8H31NKTasW*%}8K1!=@LYB>7egt@AkP{W!uR!=Yh;!6S z53c)p4B=m?X&`}=B_G^gxC|!pyc2ZuuESba!qzK(k1#jY_KW2-49W#XI3bw&?m!j^ zmQBL7Yfr#$dVM_-W-Ah^dc0!}nj;RbNgkMt%qM?*cuM7?r*()cF?CFq+fg3lEoEtNt|AJ{YCu3#IpaicKg2L&8dE8!hqjpW z4&$yP32oj+x{>_c0TxN{CU+*dfX5T`7Wh+NweHm+%JFq`NKjPzUe-bt>DXXu0+h~+ zG%;3L%htZt4@t!-_eatk`Zh+vLV8@z`O3mwXsf_7I)hUXMvyYpQ8@*By|xN3Chzn{ zYUQw5WogiEm9D?8w!!2+!#iqD^yN1FGOupF)xpj6J$2)>@R-2+RzgwpgNEOq^si2r*@i0lnaWSVfU``?zu#&yEHpVO3;IhrrAB~tS(A#O0hCXnjNc(+Db4>odvH~s~C2FN}FE^K0Qt7 zf(P;H(^NNYUEmplU+_b~l0>`2K;8`~cZ1(Szy{r*{-{2?YkgxwUTqfaygPT1?c3W5 z5r(oh%}u&ROj#j^j3oS5*{uAKi0#=w)GFTU&FH3e#NVDT9J!6>ub=$Hfs4Z&R!Lc5 zt2R$1l;Zv!)VEYm8-$`TB-8Uth%RsmEx39U8%I|bTRlH?91Va(K>}7onf4pVj zdDL@N;Mm6=;0Q7?2ryB3*t>sSS;0C;?iGG2lTRZYtgyu3dC5Wh(w*@zJqh;5)8WfA<=?>=g+z`2LL{Mv+Do?V^i^>IofUioN=KNuOc9IU)O<%H zePl7<90JGHJBud^+*7I@eH_nDsevrr6iJ(Pv-hFs2V~5yV>tp^d zGsKgi#^@!(yQQNfP5Q*+ zY*XLRvKvj^n!wACC_}h++uPw&j=%I*FHCg;51LOuwq2YH+1fD6P~&9~j(W;a?KkAU62?IP*k%);^CtgcJ{PJ% zK(0)1_CjRB64}uG`Hk1Es;9U=SSKUz8(yhSOkBX*6JZ_B)^|ckQ!?Ko&5Snh*NzB9 zL-fqO((yMAERQ}x*A<7@r3Z!XXm(LM$5)U7NDS+cvpZ&jqhtt85jnxj-4dq9Mc18| z4(lQLJ;)9QYA1BVVk#Ty6N%|Dn#DQjZ6<1WIQdtj0?dG~rukv~i=)~|65j0Fmiwy0 ziG2J)y2ro0pi{3C(SN5*Co(!zX`Z7R#M4QyX&aKhp?L; zf2P@dl=oJiG=-BE_BIR0{gOpRe>B=?c;ZP29biKo-WiW1qO5AnK%xeGzxQ|1^kqs0DfLDo`k%7Je~w={RYdz0#-;S^lxb9 zKme336q!{}KT5vZ(Kh2VGwdHGQ9(S&z{^PeZ>;S1)>!)CwlO==t^u#b@%TxK0QeaB z(gyW?!NjAi84Js*NtufM_|E=)v31u(zRnXbc^}fE=_i?N_R9|a7s)PNh4wxit_;KE z+H<=L%wW&b7+Z7#8Gv(d9$O&sAeMQYiDRgLt3L75BQU3lSC@SauYTcJY8RCbS86@etcNfy?D8H>7U<3u(9@@LFsM=}dam z_0qrIxgInPABgbV%zu|x4Y(dsn_7CKS-ste{k-bKwgToVx%2I2qiYO$lLne{)v|L4 zEWrD&u?2nec(L&qNy(?Zcac%f%3HK_BppAmd=KuC+IK7EGwCpXHzN^mT_yAv7xLV3 zk~ev=v1{9~(5$R!=}k@(mmduVz%kxALIZ5XX-~uNcnonETu)O$fh&x+qxjrelYTgT zD2lU6Q^!WSZx6n&h_-|_9t;Z_;4lJ*6eOF`e`AWQK{u0QxrJ+)oQ?7CZ2 z*sAY~tI1+kGom6GPtOwtt<=S-&=2LN8`L{)R7t<~bN+VD9A`}U`;FDB{*9=JR(d&q z41P+=f%nXPB+mMrCbaS+-ict-2kvC`7;*IOMn5kppJ+EHm+yLW~ zlry#Cr?SXL&QRdbqo_2RDuu_kvGN&N((EDBm0j*C9{0IiJY}Y|HPh?Mh7q>D9B@Oj z?+gOQt>mer*cOUhqETs z4lhnVvwJO%*9`E5pK0v$kOITa=ME&;F9SVx`hjCWol}gR5|N4S-I)R6!%)UfBag2* z1uZ!tHtv=#gOkGIWafDi@DdQ#Td)(aScORxbbOi~Z6r{4dcR#?vpJd+IPL zU@cv&4HOn}dXJno*l^ut^VIZe!q5*7&KJ}b4X?P5WuvlJ)M;+UnhC+5`GZI{B`U+I0>a?!eChDUu{E0(J1XWDl@w6xtl*IpB`hjo@3d?X&Og- zIq2(QIOPzvUp{2QAjIxgSmw)Vee}hj`q*^$5jJxl*5&`FmOO9j)dcwYU5@@AddXsq zNr!)$u~$|R{{L4m2_CV&WfO9GC(B6@v;NM``T9D~pP^>Zxwc2^)bs@DHDdp}P9xz9|NKK*$+p&1y0@pJkoY69-} zbnGL}6gaWYjUO{_-X%C5ssmml{5-*IfuyFIxWvEkalsln9M%glVXX{Hcu`Tb%OjuAzBtBpq5H z(f?0;@#8;kX24*t?L(7TB2nkR>KYpy2|k!I!4($|y3jxsV*0X$5hOX}b|I9!X$VxG z?vNvrE$FwCie$Z~PCInDF1pR=jv;=Px_>R_!WR5eJ6#Vo1k!fm5@YVwDoL zM^OJ~G};Du34?w%=b^e~gwdRA$G|kJli*%W4CehMcs1do&~7bq0XwH?4CR6&AWrcv zpcg7@z06S{e0i!4bBKpBP^A|o!HyJq);a$=uI{jkyG$y)hXxswlF3%+cm8{Mh92j9 zma&nX(dd2URw9LV*JRDEx%_1l#YJgP_IDK&wf9n~%LTL{9v0}#0XuGVM4?=UhNAIc zri&ul$*EF%``NEG2CVeqX};M96n;**Vua2PYv;A*NkFcj?HGT2aqdwCE_)Xd-R{iO zU9iVl*4X!~mUeR<3}G;%w{9()jTeR!P=$%qI)!)h^OXF|D3q4b!&4pGtJ-@#J$)}Z z-@c>aIZmwz1f)*in)EB@T_5(IH>IzbDY5Dy#ttJ~IH_PTQ3#_{#jRqD55EScQ8l2& zDs~?lg&`oNi@}Xj<9%i6tc-pfmkewWtDHFAYPgD9I`M2w&KHOficXD?$EdF>$B!!b zig#MkVXHb-9p$0ZP+9nuSF)99MTd1rwzUF_enn?7&|^Mg*Rr7!NYen{P`LuLqJt>` zy`sbWfd{e|eEmT}@PSzC!Hdc<_3yVac@Y12*@Dd3xS(tCrhmvpU<)rh(darc3lssj zt)jz+_8^j}@}H?OWp^a;d5zdAlO=zAH%9cxRelNIdNp9NjxpHwG`P}3ZSEIB#s zr?L>7GVJ#MRP9W-@_f3;Rg?QO^0iR_5&cY}C$B$KfTg6(q5lw89o2EW?K!W6axJAP z#?NLI_vl*YJD1fePxJ~;ydaEgZqs;$;cB?$l2|-fS@~J5-uGlGVFpW7y3p19zEq^@5q?4S62YT{@}zdLlNA5=toYN)vQEehRI^ zY8an;&Un&{E9_{`b zfU)KVN=GQ~$Y;H3y$eu^6@urQSPXgGzd}kn7nxg9lI1}hLM_8=?Ll!1+HjMy@FI;; z?(aTragZF3gk_CiN47efC zp?}$ki_)3Y8BAa|J_&z%ze8unH4@3Ghi5HI(F@)rUFkwk&z8s9`E6}IMy_}Qx%&xj z!@e0*Q5W11bpGR5@A6`}aP43wFE7H~! zB=w(=2?O|w8jP{--}zQ3maIV5qr93D${hXM+BCRSOdVAH7sa^B4n_F*!Iew78}Eyt zz=1Pb#a|>++!QM`bd=mcgiv&2TlJ})LnI?x^(7b{h6fs4bQD{4i4YiKT;If$oSALW zZy`NQ@?>)?UEfG{9VRGCgRRW`I$Ury5Lwb%rKaAswf{!JU$iS{=?0gFE1NI?G0i9? zQ>~NsU(S@k@S}$*9qEk2$PHc>f!3k)TBXG_{46&3{cQuy$qHe_?Txs3$hLhR!Ie$7 zZL-EMR}8R_eI)v3T#?tF%Fj6pc8{-cw9Ams0S^;@p1NF_D2fZaDmLek}41#HG;W9-5xKdl&u7~=gY(I zCXor%C~fP5#iLhoEtawa=T^RhRzm6jG^b+lqY__QNT~`&%`s23?DROD`exhzPPt$c zs`P5flvT)>9+2m=yTcMZNi{kPd055`OAw^ z!XAV-Q}xM!OTPD~oko14gWGc^9%LmH(&vcHU@KVU#-!jM84K`>!yrHbPoH9cR6esJ zBL{cki~*|%qf0N+{9hZp7$oaQUEZr717tBBe&A_GeT&*@N6dVrspwl7pYo&oCS*yR1$2)T52iYHfS7;OtNlKv}#r#$4-w!vVI56 z*K&%;!{D$b6%vfECch*V(u1Eb*-c;Y|G3pu{#JDHzZ8sfP7>UY{g-Lk{9*oRIW~p) zTKeT6bOMU8KMEfX8DHy(bae6xj+h~go+Gq;^+tk&M-yCA65+*|keb^8Fxzbit8;gtR4p_;N`GQK4sH~5pcE&eI>T8ZC$JnQ^e^ugdM)gw(iIJrK_63g+tY~6T0j58PmxE!?QJY zq4WJ0f@|Rc6lSPv{t_WQ2y8qIw%hJMSwDyZ{Sj)hqY&VE81f~d(NQ`Sl++F{$m=CP zY!oK+Bdrz2Xba)G-4fN;BdG6q28SD*=U{>ex_z%$NHvH$nYdJO%T;FMX5hU06Yo5&+sddpMxRr znXs5#ejt_}z?UGGFXOw*;Y|(|wW-}_`N?o1xC;NJao?ev`#YDr(Py5qh5;{Cw=5<< z9OCB(mgsYs7rHcVeqW)Ed<(fq-AmzigY@7T?;T$5=z4!oLqj-{<{^goeH!jdvTld^kx>E)+_k@s9Fp+B#MNJC&)gTCgc;bHMb zaR{)C8hV2@N%JN3&C3JBj6+Lch|TblP{73Wm33o>Irvfk(B(PL1oK4)2g**JWyTGQ zeG|$@{8R@g1mGccKQVQhoV~;821?mfB3R7{cnM={TKkd0tPRhwf`ZubS06f&nwbtb z75jrrZXC0+u84i3&$GScb;-WTOfh$BtsE)U(E#NOX@SXW43*h2laU+rN1h`jfWH57 z9~9!{t!3OoFEx11E76((B%#cyxd4yC{vh~9MU3zc69x;8s<2Y?*X?djWVOC78qs_X zS0xX||GD=5U}P9(^QKCr9NQAQ1!B0lO1niQc82Cp|8pyx8905Lvb{`-Y4GN`+8b-EP7b!>WM2{yv2hw|9`Kc9K9E!WAx~xb#=FUpLod4BVLB)`NtAO(x{C663Z=p0p#9V6X zg+CrPmeCfvUmqtuIe8AUprU6xa~aKFF5O8bLsr2|5ngr;!DI) zl$*e$M}5%d%pDo(eABQl>UN=bEtRV(UD8*8{+JO>03x6*Rgf@gI?LRtCofJS%PK-? zijbv?Q5|9OlB=o6&KX|jjgm2!DI8)6U?w{Ktr@(o9fk|x&5vLDyB)KM=P6_5R0CD& zokHI(azyt;F0Z1sR(#~W`64N**=St zwD%Wl->o(05^R4js%0lNJeRFBWp~crV79%Nv~h<@O4>@(W3KL>T@67rvy#Cg(nI^3 zR16Z2pDGe})NG2aOw!LdU3+&3ov&Nt0!x-*#^q|q&v>V18|CV7p6DciR%+jGY zOdB0t2p?j!YWdgOn-&ib5woLIR@YUlw2hlJ61CNbxOM{H$9jMkjNmi%8+$fgW7>EM ze=~8ocW|kgTeIW$+zDASqp3OrJ!C#b*S2~U8`nIckM>t%Y&I^$Sv*!^F5vSY;`G*c zEe%s(Sd_ML@1Sve3LLdB#ZIB;16`pZ>^R9T3aKTm4{IJ9tPEj%C%S^R6~m zSGPr-2&!UTLI|trCOK{D{%a3LCo%0UAlP7x=!C}%>f_s$N>_-d%UEN(onvm~%a!8a ztY1UW9X7O=cw0whQ`E7)=u!?#1fePYF$@Whr0I1kwo;HV~(RU;D?dWZ57PM|w+Ba8Fx*Lb8|ZKoED(gIbHyg@JKSl>1)(Fdib* z(Cdm~mw6^5kDcAAImMXFlxKw`i(2U9CPc@3Ip=BOowk!d@K+`}TiPdO2Swb>jVm@R z!{|v}oCsBLNwyn~c0W<3%HqQ9gJW31pq)RnEJz=j#NjU|uVja=6o!wunz*Laji+ia zdy!k~`IO;Km3^hA$pw?0#1&vV=MJ4+VGtnD51%=cvGU5Q*b;RRmyW49Z9B5v$fPbJ z`NBs)I2Ca7&%)l4X)@+i4BICA`{nt!7TR942Js2?Z?W& zOC;G5^@@osMT|hR+6q*IiL(3*WC&vXlib8ixW49s$57NcO&4r-ksw?bV)sp!H5shj zDJXR$y#UUMJ}L^c%FK1a5rIRqD%sj6G67R|1=ri)1l86L)&qkMPh%e6^1zaS5#*`u zy{J~>SvjXXC}e+j$Dj@*Py2r#_^ZxEU_9c@Jr^_l(}*;9Xwn2z>lWepeZ3?jPn35E z#h%{ct^`|-+H=CBE1H#TP* zG}GGg>KZP-__bg8Oe;k@BixODq%8cGk&jmDc zVQyr3R8em|NM&qo0PH+}Z`(Ms{ac@6o`Z+JSJblXI8L9?S6nXb?iJTdpE-`+=k^%d#E2j^o^3Jq6N;oEeTt&JPYJ8ZboNI!}ny{+Oif$pb(T z1VOLcwf_e}Q2)Ojv^!7Q-Jsh&>2-R&(X0i<(bs#iV=-b zHHRY^Ld0^VQ7YeZt)PwCgk=e7FXbVTAyL>gQWXMuB%_~DDk^vgqmDQ@m zsFssmjfP%TpwZm$LEt_1PCUS4fV2N2!g5p($|ITTzSGWHsC#MJdzYm}(8KSZL z5;LKw7IIvgZD%<=08NG=jGUbUup}2bV;6$aXdJ@Z!F!=EBr4?E^T-gne!G@!)8c(A z^CS2{$f;Qdi9@QiwC)RZf{^8XMkDz7@*RkQZTWNFN6As6g0qXapiqubLYgZLeFTLX z5|rp2!JDrn%^1$CD!K+g5^31$c8`q-zp~^%U%m^W(O54Hrhq&WF~(puY%0|G**A;# zb^r_q8ZgcD7^nuSfGCxx7=uozs&o4I?#iGjI^-Bbe+&d}kTJOC;)cWdnLm4-^RwyO zoI#uy@+&PVAHqOLNW=|@fkqB?$rPwWV>EkVxG}qTSk`Cm3B*{+4T$BkSAS zC#mKCmjrc!62>Bj8^Sec0faGeQ_f+Y30|8>0V>0Y4#sA}s02p($dvEVAr}(Q9pPn# zbID$X zFU^a-GOQwzrUVpbL=ufLFsig!%UGm@as@&H&49$QM5R!HPKbsZ${28=Ay?=&h8fx8 z;=)PL?wtC8`PX{!ydBJbJ?;4Io_%O{p11$cJiTy_z309fMbG_W+I%J=!aT1pgf_E~ z8O=L_j|oB<8C68-BbcZzl4#$7@YqUanUpIr#dM2-5@Vp;16T;P+}LKkGrJ}1qQ56siy@)Ig81gaE;>-su8RmN@NC0 z$PgND{*e>bu&vh6gwU|OToVok80iq+i>oNXIA=Js|IYtwOfyC_s@CTOS!N<$o$V|X zSmXbL<4#cX|J~kc_u&8c0k^kJh;cwULc{Id&#~LQp9?cQZa{M~@eCpzfd9#bif;E$ z4?RnFeMy!{%Jl#m|E*e$tr#FvSUxmn@*t1vzd!bT{@$Rj|IU`CB-`O(U|s&l=={9? zpL9Bh{LfxsE-%sct|^a0IJcYNCz5$7Y7!Go!ou^H86d&66pW$trrmDmgz%g7Ik)yF zKx1|sXoS$Xy@lysyTji2Fqz!mPA$cZgFib>OeWz{3{8fUN#ptA=(96LA;%Kp+TjD{ zI8~y5$Dw3U=~+r~e&z`Jy)N zi$;Kkjfm#uD=~fBSrHAOVU}eAhcq;X<}X2+K)a_dH1%(ykh(M5xfs zCgukFi4fP97$IG@KNkC|i#4P=$@@QFzPpe(pkL=nYn_vc3+N`hOV-uxzl+UNG`SQbJpC5mUk*WQejlvkuXSrOgvsP-IpT^vIww#y$JkilxFixN z$Q8;FjqogrM9%g5yGAl|aYJ=d@~#~0XA@LA?axTH_=ThleE1DGB--^o&>H`LeB3^%=YQHKhy2f8pz8na+LK%-LegKY zMe?s-D0kgGJsB1U1;KEWs6gfnRp>Q=Wb{WV@~lW`4ypbk`m-x@n}Ss$mk|~iPoEic zK0?{AG%WC`UCVFF#^!o!l%?-h_OUSg#F>4yB~y1`aU9D`%O=cLF3sW?MV?y{Phb9NkxmmTj=RHr2K=KKuec)XLp+=Wg)E%`!=_a+#b7 z+Q=^LNzbZ6Kb*!@M%j7=D?B#Q!pC-|zeA|UfAfBBH<94F`@ha{J^t%;dWZb~USMhd zzowlV82$y@wLPzj&Oi3#d*h-x_Ta&bNC|q3i?E041$}Z9pDJf8OiCl+f>zhW6SojU;F?Tmg)bltAW?u{|B9g z`~TyhchLWRfLroamAZxQ&Wd!c>UtWlwcx9WTyHbil&N{rEsEy8qw39nqx!&ub^cyp znf>2&HR!tdZ?XRKw0nH8|9gQ${pa@$ck6%U>fj#X;JWy)9n|!{6C8IB`JcVOPP5}h zxL9PuO=L2c3wQM;jXu#);v$c^thnobYL1hxl%q_K7dX zzxeTP{ohSB_!|B1bWTp{@n8G2ebE1Xz?SvjQ$_rys_!DCXlz&QU7?<@@?KVvTT%LD zW#Gna6lKZZ^e0JB`NIqoSgou}F8WHj^e0oUn&a9WT5 z+d=2>_aA$K?P~T4Gw|5LAK>1wO#l4^S?WFvu#+*?>Ho>`!ry;)4&VRV4;;Y-(HbRJ zpar-^hnoaB^mEE$%7>7V=$Z^s`QDNHI+w~b>-a$>$QTTn=({sN<-_M7kr7QtXup+J zX(v4Pj)3FPeWvSwGW$sbjNRGczy8;U51ftdXSp&b5@aZWQI5XnzrOl>rG@S6~t63&m6W}_NNoy^ZUQbmi;+xNQN!*WBRB@yftIgC((7D!GJPU-ZNj_WZpBs zPp-XZzD~{WLeio4?4RBdd?J#HT*2GdZ X0S<70yWn2|009600y&950A2t9%L5b7 literal 0 HcmV?d00001 diff --git a/assets/longhorn/longhorn-1.1.100.tgz b/assets/longhorn/longhorn-1.1.100.tgz new file mode 100755 index 0000000000000000000000000000000000000000..f6263fd6371b757b9f285dc6311a72ec1343bb0a GIT binary patch literal 15802 zcmV;rJw?JFiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMZ%b{n~|Fq*%IPf?rBEZd5jqAs?xxz~K_Xe4EZJGRUv%J#~h z%x8n$Ac;{E=mF4_m>GZ1a-QWp*|%_Q^o18mv^+`F$r@ALxKtGig{nfKkTN=%GEVnS zrijbV0?pDtZ2ReUyWN*ZNBZAxw_E-9;Bf!dANG&BM@KJS9=v?{>JQ!h!|qY{576B@ zBn?k4B;tSQ-n*|Va{nX`qLfJ_38SLd0*H}BDVua+BxQ;_gzceRvWQS2QJVI^u}c*5 zMie|V6uWj3nMO6`zp%@1+h=q`N)>xd0 z9;AfkH;rF2&gLY+BAU#3(VqG;-ydO#_8V()oMI%fg*IIUZBC+?%`!$Yl|fOIk@uJj z!e|fX`z@4Z!JnP|&VDPwBIYEM`kf;P5CI4hA|xlHT;c>qDT}W_NXF3wgP{YA8Hc~- zBg`q5Sae!brr?}PnTg)s-h{|$KI+75w&&-(2aLq3KUb8a>y9a&3(R9iC8ko>jA=aH zLtI`neziA4LSn9n`7@5CD4JER?Zqr*+`o&|yGXz5WOUNH!i#Ih6VYo$1$iQeQ7a0u zZAAuO^%l=btUePF3(|^!jz#M#A7M|&r~m6)Maq|*Zp6D!TQfwcM1*3lyTUU>(jEkF z|KDKrTL5KF3=6g@!!B6PW2{@)J>72WYm3u=y2lfEj?x^9FQNdfUjO%B9KEQm z|A*aIkL&+KJWt>hk5QgVFbtqq5ib*4PvCM&gjs<8cKqgbG-iB;q*{ZS(YNbG)+=ALa9|aWQx@@EzNHLjx#2RWU7)?JN9OTbH;HJxr&5ERbgvtOEmGzu=?A|m^(M_ zWK?tOIk|JgIa$)CTMgda6Y|=sQtIyGLHD4$f3W}3jG0LYbfcLDK$q(Oifpbk&q`T5~2r^=eY9n0%s|S(Fx0y`El4%B#Bc*W)6!} zoMMU1`#qxqNc}jQ>{!goz zBIWMYmw-L%AMF^|KLle_{n)bT4zZMkPBiFhX6M-@;uCBe2;ZD=oM0*mN{2Y+t^`U` zc6|{L;lq@q_&rPW89v1toy3%kIYtuO#p#rYtJg?TAUS6V7N&@W=@dycKyq4p`I^Nj zC34}=)+i0xSRPYJj>ls{{YPsEy`5t|aI_RI0TwEcS*_GDGR{9FbHFUbR@B zO*l&M$rQ&|n3v$HQ+<*nj;O>qIn&GI$>3e7hBJtof3)xjo2KUUJy+nr!ku;ZUU(+1$={MS&FU42H*)q zaD|l?e}xxq{Q`hx3KBJ|+h@PzD0T0(3DSeM{@1<4<1voaLU_*X9Mzv_Njm)cc`4qi zAP8IXR+#_#`F@x~s+Nf83NQA1&^CW))b`tz?+4}Y2km0;jlTA!#@@6Jr#MU5!v9c6 z{o63XrX=`XaKpR)m*RwX{eQ>7UxD>k9dC}Ef7v1iA28%1M`^lHO1thl0m=-L8IH+# z;WRYxwKb%YL5k5F8$gi&q!v>V6P63zbUbAw#+on++gxDYF@7JE3I>EK-B(}%@f6Y6 z{L7^Po>O2HFr5?5sG@~{Ya*wR5-C$ns{)i=5P3XR-79TXnGYQ|RtYPH%VH8#3kj`& zR0;;66WzKR7_kc|`8gN*`%HB}ag2pPe6g$X=Xl2EW>DjtrwbTyq-isjn8VY8)K5FM z(HW82qId!jXL%3yyWN@jbB1TytlU5N^BZCj{3XX?|HMkOMQww!&II0MZzbDSiuk`@Z*Xr|j!NcYWqQ}6^)tqY>qP~@uEFd}Tv zHqnVy8&K>Gs$rh*clQ7Mrxw)u(Ygx2lqnPOhGZSWS^;v3j~s@C0^=eLxa2#w8pbDSKF^qD;X4 zSVKaDm696XUCo{|#aoH>a$c(*h1StbC)&_4q{Iui@fjm&=F{ctq3O-3b>6j_Dq}E` zsc-;T(OG8GYu z)eu@Q!jmVa*k2e21miqSAtgc+K4U_VkU0ZLcOlbzqad&qksOa3_a2M>9kWGB@4nd^ zrEKK36cZMUy`cHdED2!ij}<{{LltnVW8*TiLYetEvaBL{&{h@OhEm8G6WKl6?d76Nbh)W~7BlUcD_I>{xPR=ea`+w=59ABR4x4^h*DJXL>JchwV z|NZgh8T|F^Z~99xR8)x}+lgL8^UQREiaDW2ah!8G#T@c%Eptn|A(2yxae|Xwg(QZzIEx;l+nF12`aa%qawnOPl8 z%L6|v;NtA~^vzjkmS~7(N@x&jc2kU8S9(%;k2#PLMiEkS!2S%t=(So;;DpgJndBS; zf{ZCa80ShwgiIUfPLuJ9kB_I&{G--XrB%*JiYGWxEr^y8(1YQ?F|Cs5c70}Z%(=qc zx3g>4ZaahgC<#2yQ;oA3Nr#-~&wLaaYFVX7rHomT5T5k!Jo z9upSlsx`CjBk5I+FsMtb6aRWscPmfB{Qo$`X@+B}e?u4mXn~lVvi zc&ui1+s{B%xD8S9S^FqDCg4~bs-5>H_(+v>%Q*a|v<@1>%R~K>%!c`ROm3hZ6^$vA zPyIFko>+y=_o3{U1^kkuR0pF1$}$alr}Y7w)^&NQ`cP;H7~vQx&04S-4uUBS!efHd zM1V2_wT}Q>je-bxHd<(6oSqL=S*7SGb?Esjx+v(%wE3g%ruXgUL~=DVPN*D1`x_B` zBihipFiMoEX_~ziJ)0r6+t&R-0J6hEf&GloIL(u0Bst*| z@ayUb%-0q&+d}Xgj9wmL8b9~HAi&sgeFS?ut-@YFBb*B7F2cgtK@!mf@dSQQ#)&DS z*fN}vv(-=eLszrTuf8BiiM@2ypJp`z&!Th+OV4Qv}N+q&f$-4 zR11bBW6uI65Y9!8HwM5p%<;YUof^QYC?rO^K7lF{wZO=s@G<9UBO0rw$V%`xsThC_ zUcf9Ra;G{tHLUh_S`IVq+;=K#&G2lRB(~5;U#O8DHKWuQT3@0xN+nJ;*6^xPyZ!p# zp#NEopR*KiCmeX4{&&>vR`kD@hldA``rku5;gJ+RLUBhegp8AaXp5?I^=Bgu_ghzl zCO%5-f~B}M!xAM(qMkoD;hi&zcUpFbODGu{L0vU)%S;D;14?Luse~gndwHIkgB2(v z-*c8{+Doec#5WRCp?$&zG!sAO&SI!iaV{jAxtDt0679fg0n9NU*<$UTcBu#WvEBZu z)EfNQ&R8PayU@mSWwEMXJ`_)yh@ZxnI@V6Kcb9zF6B5Z>*Q6}5O>lOz*em{IiP%fo zME$m2T=P>eaRw%uWQ)J8?&&Humh~T+)^^Njmf;Rw4 z_#iRMehux~y?MPOP^IX#MrtWdZ8+c3w(WP<^e(gwTOl8IW@sabXQWsV6REoaayH(o zvknZ)`L=is8uy{$l|4(g;)|Ob>wpdXJ4*o!dI_eS3HlFz1|MyhTS|{Crrm^djiRhQt+MmR>9=7&hGWe$yd~ z8RH3|;lffMI<4)jZ)=7nC$U&Q3$5Tb^j;y=zV`b2wV&1c--yse3A_8Lf$RMLFOI6` zzh1q3_3}~wdx+=m`rmhkH&&^Eihb^(2C@`i;IV4g&HWWHwgBoXYO`W0@{vBVtW-=> z$GQ%Vr+5SD9<&54V_OBExY{N7mSwP2}Ex%$sTmN(E2=a}c-e{w=!))mZ%ct4Bpw=v225I>i}1`24xIzPv<} zkeZ}=_M~(!&y{jPFel2mxRJd;Z!UseeC(T}$!vybQk0FB6C;8s3H~sGGyG~#7@NbU z!H4#p>1P_;n0&Ulp-6kBi5B2?oK!qP_s#$H?E-R8TE=4%ey*xBWZ~yZ}w;PBIBN?kQfMb*p ziiH?(Ho~EZk#RO;@f9w~9n)*2ZR4bPqkt>HV|q;RJZNC1N}1!Ods8OlUV`U)wK^GR z@v2Jdd%aeW)2OND9Pz!BjEbZzwQ`(gXkt%xen%3*acm;#mUb1{9h}IzRyOR~6)!-* zbY3Uj;O*(h^W!&X!@=>%Sk8$6(H{)69 zWGo39bp4#aJw4OdZKtaY#~sQNe2!*yoKDaFa{TV~<;UUKp z-Jh>B?Ki+8JVocb36J2l)Y@p(@(tSf4Xob&^6r!kkHZy}N{8e&fU4Ar2Wy!*k!J~# zcqloNc(O3;#95jWI(er*IS!kln|Bns`~*E{?{~Z3v|CXWRg>bC95N4VdfF(YyPKXS zl}Et2mb(fKW|3h7l)ysaTx1bANeQM>%lWbey9{`*zC;bGNkz+$ItPYPYri@&B*(yN~%_ALMD2z9&vG zS>@cXYbLi8ns=7y4xlDR?ZI!6PZgGDk+kW60(n(Q9n=Z41UZ>isvX5XcTgnkInE8+ zt7aYY3sRc0>&RzcihgFJ=z2<092s>uN__I66+LZ|y~y=wPP6$@ONh9NQj@|+C-f34 zds>}h#UC)bnB>J-H;t<#D|%Nd}m1LkNRn=DZ42QADR z)j0a8Pj$5!2e{N&uCXHRj&q^vgY_#KYe-vFg{;u|b?U0f4V68-yB^Zzsjtd%L!Swc zGM1(h@!5vfN{m{{EZ3+`nia_)Yu1XkDjBvsTk3e($i+?<$Foc}KT+h-Siza=^K`lH zoQ1XlZ5u5@opoHeyk4tBV3Gf;RM%wkh%%N$*HcWZVl~Q3dBE0muwgzi&u=D+R#Zfb zMLPMlNh+oZzM9)sXC$ji%6~A5?4u8jA?u$fGC= z60@ynyq?Fc%0h08xy$9Yo^8t6->S^Q!vt{AH6n5|5w$LVTpe(;Mw&Hc&Fa*gc5aF2 z%2*=oeoB_7=_bswE_Gd*XDs8#HhrC8oWv%ubL%--ySm*X6JAMj#U@pqJ>VX>!#s&k zRk~JOtxl)s6+)j-FY?JR++yogiwtu!g;JANZU`X(%_We zO{`84Qv*Lf71O7?@HC3mf3?E{euO_hi*uerG=qJJ1Vqei>(M+-A-ab4H@{A$%s$2} z!Jpe;4-HqH_WJ1X-hBT#{KvP@KInGa@Y8=m!T`rprb$KUQ8JS}}$!&*VvQnbPP zc!Q-I^UJs9H;p=29S?Bdj+>(biav*D7w`KgXCIGGPTsydzub1u^_|y@O)3nkT61agLuOu4sy4|T zA}7QC$K%V(l5% zv;@{=WU52FDXGF0qUq|B4W#zP+2D2mH$P24AJ&^C9!sKKU2)&>p2-9j>#9_HaWJr?G2mB$M}Z~LtF|K?;H2SC@ve;suX zkE-WCU%Y%g|NS7(t^L1Si^A2{SXNwY{@op7Z|~&?)*U~f>(}1ldCS9?{^rm7yBu?} zu`ewrTR(kS64~zMy5m@X?lv69no61ZmrJ~Iuiu`3|HIph^N(+i&yT-X2FIY$+F0fO zYyZ>gLht(b(}QaD@2Ug6>;JCKO}y)Wg(DMD6je?*R043noT&ALz`xW&_tg|%WEk2P z=DO5qz}d~B?OH3R#hmlH-y2RexK?$k z5^AeXXlQEJ4rh2lm04k?{|L`z_!3JFW#r^`=P??}m+@}6nD+6s#=qIqp#Qr&ZMKs8 z^AGFw|NU3B{GTt5jvn)WKFCv%HE!(fC1ap-BNq!)k$!V;zCXeeIb*@yF}5O7g!pU7 z9x6hlE}9D%YROcV5JIM2Q@PcxQuWo){K*lWVEE$|UhKjj&7EGoZ)?3aiV~-Tk+rp9ah%NY7e!`;&co3tKCAgU}e=3t~P_K-c#4t!ZkFbJ~+6w=IyjnB&Q`a zz@H>>S@GNLT4Pyog2X&dc`s@Dc4<6cs+iP@I@6E&%jrHu~b{{|@($s`)?NcMAv5f3s0Zp4UWX z+vWJ5GIrJOfnppNI$SSM9LdlLd3&H=iad_ZHEtt3W*kTQt&6>`uOgK`9y-_%#gfeN zDMm?3C?4XNX_^0@+B2a<4jYf{73BMCJxZ%lgZufMZs}TT==^sR*Y> zV|{z0Bh6d-7evyW<9}Lz)nlfRf4xj0Uwf5%e3m^;`k%~+X5W_ zk`s=TQ_ivzl$kSk#M!6kXO|x@jz74Da8fTa{M+#3@#&lXxxMB59LsCQuiORV=4F3y zQhqV$pB5na{VpvVm3iEQ3-$7tF0?!OHKF;Wn`c>y^(B~Taa&45!64CE zROBAhe+blyLorF;ex}F5H z{I`PB%FQj90_*1g!K?kM|F_$Hy#M(@o>kd5-HNwD#klt(d*>VEp&)xGshHUZ`l0VkCv4I@F^GTsnGoOpfPsYVM>l_@Y&S-xv3u5oGitq8O-2Uq#M!~^^4F2 zqO4GT_o&+@XfJ`HRLq}+GoAx$alX;%%lfk!P6(2w^&sDsBVIlmVg7b};iIybbr~XF zISS>1ZiPfXuhk`)VV27tv=4ny?wGULcRGh&58CeK2npOb8(8gS(Ys~LeHu;Y2P=Hp zrLR4Bx}1DLBR{jzYoa&%pZ;!L;tld&rox(jTZ#c&YyZ8f-T!=W^l1M*$g@8FtB6{v z7P3rod#f;zsb4J?tkDLPsqhPoOhqSlp|vsQl8i~L48GrB^zsPP*vI{JuCPr7cZCI8})ipGq`Yr=i}0kg-LpU0S=$C#hrmxlBBta_UCKVdF^ z-`WYFb^70ngPQ)=-GB7|Kge_U``;VvI(QZQ_| z>AkCF2EzoxsFB)H<8y~Fmra=sCq&+n*HqOD8-OkN&?0d7fCi zaU$VaiYYux8N13eXs;5QZDvg_7%-;>+S0q4E5gR?JJ+N2y3sFfgbM;M@e;eMT5oC-i3!;A<) z=%fP!?F|PjwLwlX#Ef$+GNw1~RTYS*h)XbMZCz3IO-_k0A3J(JuSGBT<7>;X$LH>k zU;jH(oX*IEGLE<1$J+h>{TG$>|M1}G@%;aTJWpVNq{N&GkPOWD!}SzX(AUQ)(U76| zO6kv?))Tl?VnXDZPHzQbiqjM(DI1v#i-b;gf#VcOGRHb^NAMoer1b=-vWEzx@GRqa zOm1)j*F;X?Kc08sEln4|sII9%f(&y=3B{dO=XCgSC>h7CCvd`MGe+V4$q*94MXNI* za!>!)fZytj{=xV3f8C4eWKaFa{VC>juV~2Jz6oQJV$s^^i0iDi(;1vNLYIR70(Vi*C+0XxEs=t2x{J%ImK7Di6nI+rrW1au^sHXoP z96rW>KFsq3+@-c~qE`P_>wm${PJgB*IHGcAr`M!c_&P?NAXLBoRBW5+s|P<78~4Cy zy6Bk3KOjyCov5aLI|;*rK9x?i=mUupj3!ysv$CtQ35FDOb9?W zrV-c}^oDn@)!OeAC>X6Cq6kiu42>C;oTVw|d)C_mDVva3=QJHXy2*uU{zq#Q>_!{SAmp_alD9A491f&!&Gk8xog62ksk(NXhR@`;ruVd zFhCXL6iL808539()vU7$kbnz}k`J6nrOPkeht6i8Y0r;`>nMOLfXKgNJf319%|7a` z3$GAxGVI$hS{O49fAuk7n*rgve-U*5uCIDNMPfQ)h$ovhloQRe1CF(VFNnlKGu3es z&jCEAlFSrC%`zm(D8**Q44{5?s!u4ve801Q_@~{Xa-v=xc3%By7qE;w&lN)j^;G$n ze^SHy23=tgImhrR&UyN27e0*%O=eW8pA(KV^)I@<`lN3N`7}zeNb*V9Q82=oI^lp5 zB@2|4&@MnVRg_=|y0pndJ8JBLPSw1Oh=>JgYE&x^M#Shc$tOqz`Q0&pWNwUz9kuX;z)z9L{dClyS`A zU4IuSnv)5V8r|b86Of?^-Ze;JKv`C-6rU75X!%_f^W>A_qrPV<-HJq`POGK8`*ukZ z7{iY#p;tdW+ho2y*Ry?QEZ;HXGvn6!XH?s^_IM(pEQ=H`&+yiDz%}~c{?TFe{LkUh zWBm8SJca&O)|L>YWHOc4Sp8QUbwr;si!1i?7^a7<+AZ zWr0;jUX1PakxH=V0&@W|qLAV_))oDM3hBj~)-RNt9xXh)od95+FHUI}kl;i|X(t|z zLJ`xjU=&YnyBZm>pz)M5%5sq|+$934EhVU3Iims2@>G&6#r~=R-3Lz8(&NAB^``@u zQ``iB>e1I$ijtyx}M!HJdcA>ll-h52o-^%1+)h81Rb=9rq}KmrylS0g3j zDNb??xjmp#RMvHD{r_mSRxAH?#eV~(|BI^pKWeiDPFbAm94JbpZ?+uljZ!w+n<2G; z?P+`pBl!8h%c9_k@r%-X2&3Zu`5vmzYWe@_;PBt&&5v}JdeuWxVuwSNXVkrOOpPBN39sr^nZAU3IQvG|zXdlzKU z16AtOmeMT?pR<3&Sc(yC0f;HP{wVU11Medl7wMQLCJ9Zuu%&$;K@}U1QN?LRw7ZT7 z@JMo_6YtFk=W1E-S925Ad`owk7{d(5WW1I5r@V%PDjD`S0Z%XMGmRf78>%vPY z0z(%CvKc8Pnv`jvEXvOQ?XkN=lRKexX#uz=KE~a0zmc^rn+;I6o}xRB>yp}AB*Q&M zwFKPegIcY&4P;6;9F_6eY(VX;DQhr(*T3sPHq_h_-uDV2XaaY0R#;Md8(5C&F+q1?2is1ho4*Y=*q|hf)jhdlv&Oy@ z-Ig=S!NGk=u{pD>Y2_Zshc{>6iAiEXBGBl^f;QGrCfp$XLYy%Oy4X5%gw$%%E?;)K8}LV+3fp6mI{Mq-63P;6irTZz=2oZiwa+u zW5S-c&NU(JnlzmK+i*^uCR^v8Ix%j;K_Ly6CF^*{)56I;C$S*TBZE zL0`6xc$TFLw*l-N{JwNM@W{Jv9dIR0zCx~IH(^8kgl6$&2#^YN4$Gp_552^SJ1nc@C zg$_CcVgl-HxEf56-n%ct{(MwMuyGea=l1XF=WL{H)tM>BL5^$A|aEj#x zW76DhQV@8B*tfNSGbW>i@VkrozuJ}*0B}mUPNTDaWn1Rl2#xkIacTeqv443!I@Eva z{2DgtExPH!es=}_F0fc9KvLpAG{`&f-NLiop~Zit1z&3JUbqL=*~SD2)w%5u!S?DtcT5P^CE^n-LoiHYWC%W# zQN5gEz%RK#B_sz48-uJD4uYf zC`|^XUXy4>8H$n+6s;3MYXtScajQ8XAy`awY@Ssj9BDx6ejbJdIXeh zyHBx}u}MgZV^tn}1^90^0$l}69q0=v)hp5EX@6|AC96>Y5nxjT@4_l2%&tr2C`y`u zL|3>7T->t+)aMO}RIRJ>`nZE+kg$e$MO&xSI%i6Sogx|bp%go&Yz>qww2I>*<{`3SfU`BN!7(#1xCp)7$e! zp%gXhfU6k9gu>d!Pl>pCtr!6(=Pbb@ozos%+PQmGBo{O)PkI?87$pf#9(cIy-=|0-eU)A?Kz?M9MBqP`N5hrTXKigr zwqkoFt`#FDs;(k{zO1jbTDAA)i0`FjR0PDf?|^%Q2y1M5b2_|cF-nPCETu)MK$#vN zFbK2&R6QA^4;5)*q_(oPXgIdf?&`H)ne8fI>oOxIgjydo#~>r8GA%?%JXKAYfbF7I zr*#<$PN5YFVkM&I*AhgUFHs=M90zM-v~=$BUEdsJ3QrR0OFMFR*e%ZzZR1Zdd`beU zesZTZOE+Yw^$*PvNp+C_OivTUajzA@&W@XFJ3H1yBxNj&#i(Q6IffZp0OR@}C5lJN z5chOlLP;lR9q!MZ*nX}77-1nJJRUPHJq*@NRTe1EV;LMZlnj(5E*V^5oZYMUPGyl( z2noruV4QbNygeD~Lt;8dcC}x_TlVbN^3EX1^{RsDoNz|nTR5oGyx^ z8CC#jR;a>_ zcHo0G3!G-4s<@JV1a{R2X0HVC?oyOx7;(42sx>+wuF`3@CoHDEe?C>k(>6TnHK|Wa1@Mf+#?Lr%{H0@J~`5dKnmz6F8stlA0 zRn`*hAL1v;hQR=My zqxY1`Q*ir>_~F_`9OaS?Q97?I*T>E+0V4QVTbsQ@%bhlM4urB>!?t1lMlKVM7lUT$ z1r6B3=;~IVr&#JOw1kM5QHo=2E&c!hU)i*0iCRs|-pO|q-()zJI7t_cqot{B8Yuj31Lv?) z3}1I$Lao)BA9EwWrOi>HXz9h0GV`?zCBmlC(dW=nc&_`I8So&BhOk*Q1eUutTZyn) zG&-HmbAwFPOF@L1vIe6(z{s_4e3*dMYc*bFcnj~J7?pr>$^?Vel4u(?I73u70E z6d^A5cA3J*p;nSGM-HM%iiLw&iDm_|wpY#h_L;sox>a)1)eY%8(z*kGV|m#RJ3}KaC ztI0ZhU`5c3!qD5nZ|Mo+38Tg>TvAM|d@stvTMDi;_~KS}pB>_Li3?g8(HDJg)q zY3&&Px{}L6_&Q%41-r<*^qj7dU2f%opaet8f@BwThzP%0f`M!l>4N zQ8G(VY)m*~$h`NWSjarSGM*w7WJuC$k@l&N5lKDge!50Peu5OEM7SX|vX;?`LpcT6!aFiiD1{!bt}{X-V>_>VOTV(+E$|oG@*o1!6}rORaOEJ)&AcG}^W5 zy#t?!yW96Z*eC_5EG?D#l3|^SxWr?MZg=6arA|uKpm$JeBKW^NYE@^YtFwwVb>KR? zrURCoT2j=xQZa}7YWdK(6wd8llWJkBoEhZn+H2xc?do=mYQnB*$9VNwM46oGv@Ew` zD19Mb!B$JUuk+PVr&4oEvvtPTfCHi<%hXCeFX;_FgCNHifA7ro7H)u@xo$Gh!lH~N zJ3AFUB)|oSnAE^u(wWgPOpcryq=K2k;cAB{h>TFkGrb<7*yLr@6bb<^RLS*lub}!v zgFRw$S|U24$*pkNkeeO?uWgE?VzPNr)XbsP&t=+GzivS8+OA9qlC_~G=lZY?BJ=~1 z)7MDIAB@UjHI90yi9IQU2rah$RXzBjtgAGLz+z3PnhPeg*r%&}gkzaPqMHBYq+=oE zY>=w{?ejI7*aIP}n>^a1?yZ2$16_7@D3d!oo5JLF2k!{j)^x9p_Lyc@V{vvPj|Toe z|5C(=4(A0`sDp3hQE=`5GHtgYC`uBdzM#}(+BSy+jAM_{ped83?M5CMUolG)%yskv zT&ZcB3Lp|@WHQxz%M{002xOcmp>xX2K$PmH{8g$SW0B8bjFHSaR+gpNmtQCdKSgih zf4@8$5xM)nOXWY^%Wt@WVP>bq1=cy&o6Iq1a)d>yUg6MqwbdL)aZ?RU9y@KwPFaeJ z?4x*24C5**xg3??h;^-PiXs@iJXy4%0N>@(=XEm%FlZBS#3BsHnk=6BNJm%6DWZ_*i1c(3dfSzS>dNYE14yv;I3-?k+T4D< z^63G_uY^55P}xtbKtO28b9t;2{T2<)(85Yu<8Ae~5*TQ%btt24BSzv*EV9b^{Bo`H z+0S9KVXk-4%A1$PBsN7LEVE)36H7sbs#9Xz9?ob{*WFz+a7?@#1t(T7%cE6f$ z#6l4pD6}{>K{F`6QtK68z%?h*J0jfJoErC|b%eXgNouT_5kFj+8e**nq=-+jkPZDK zt%oHfv1gKOyMdT;0QASeaiz908&)_OH$}?0K&psUTXYlasi_@JF-oGFo0|@qO`aM; z>J@#)l45IETk%S~>F+`x)?pnh@3Bs*TrZoOj zEA4Hh3#rCs8lB%dCR{}`;aydz)m^~=Hf?P#%u;5got`|-i|@Qn*7qLj8r3a1=Xq9@ zi@mEZ+{Z_1q-$HK9~>1!0XbQ;w1>Z+*qKi-5f+ zz8e>PZ>K<2dDFfd`oNyzm{F@tdRm!0f;I+dfVnb8Uy~UrQTD{w3wDTY20|A;G{D>j z8~WNSh-)`}jILHV?@HvgJ9Tc`Ogyz>$$M9o2y*_nP-0n-G-YS#rWTgr&@zWW{jlsnx3dp>x`9SU5v5&@9oX-74K**dnj-Y!i7}uNn`4iT zQlOk(qqswh15vQ+;BUpVmdqN0T&Rgg*>%xZs!!GvS-6h9*jEF*TO7C9Klnzk zd(_vJIW<*l;>}(VlE4@1K8fI7dmU_nNcZM9ryNSq=I+-+*;l)~2Fu zQnhlzs2~Y0jv%@yNww-%Ef!-=SZO@YxpvW5mCOhH$Oeff?0vI3@>YlI9d(^c1%t?E zhOQ2Urz>a1QxH>@r-?hoGts)Z^%Wg;3*6pP%Mk)=c5X&Tp9jK5bd8D|K#EhCRnw06 z=%z#~6HaFS-1R2NGVEaVd8+}2VNT8XGx zirqTQQnw3UK4Gf8o@T>J6)R1uV(1hCx^{4yQZ|BP)_J#Jq$sC4m`qMFEk;Z)3X_zL zQ0ke%RWWyD+92W5w#t~`G=aajtLyTGnFW7um!zE$1x267)^;xYx8kW{*ko55Zw)iZ z4?rDYRs>T2x9V=&lBg{asV;XkFcRT*>I9bNG1SSvotWhSh-`4w@t_G(nz@in+$Q~6l;BO%PL7Yq@}@LH@>ywu(u#i$lLxnX^~_s zI7+%=;C*H%(J`AgHEiu+=t{bwRS4e#hw_2zDrtf=QVkg#J%aAywN)t{S7mElPLiM5 z=rs`yJ2Fo%O5{k1SiuAxcdzA0DVZFP^Hi;wzvN7s%cBX+a@o-v%DeF6uWgEExZP`; zBa!W0XiFrnN-tv0XuH?m**kz8^FQ^L8?Nu|@3u8zMY{*Tww;^5-D@Ar+MhrF^t7~* zzMSUZalgfgu1Ypm%9Ttf91F*F$tLMX&-69#`ub+8L81lb@CsflrSdzJs%4l%EIZ&u5HP~S$XI=Or2x3_=y z2R;(voF#bRbHPN4KT+b~_b+Q;HC!oX*};vZ8ohRfL%;Q0xgifV@gB8FOSLCm zsL-HvY5iwGw+f4V>tOorx9XSraJfr3{fL0a$RJZv`6 zQ%aKC+wWm=;Q>ov=xTW#Xqj+%s}Dc zVQyr3R8em|NM&qo0PI^$Z`(W)&e^|$=pM3rup&2Zy6`Pc3T&`#P&jQaMGurTvDi?g zKvGWA7w><6K~f)-WW`qOMhGBY1X1KX!;g7pNQ#uRRm=sOh9Ww-h_O)KBTneiA(QX> z{@HwPp1$ulpZvzdQ{{DW%Ikus^UIP&+#YBuhbB+M9B zSP{1zOK05y()GRGIfHCKaYsA{7I=SYAqW z$3$FOghR!YP4Y3YdPA-u-{2HpBI7a!**aKo^-*JXwO|nF!$$?g%5s zSQ1b;!U}8tpf~_(m;`Bq^6s+$1Oo-KV1UZ@OraQP;>q|V)UL;|OJjH)ViL^o3ZiN7 zxIq5{nsgK+nh99gL~IvaS6ot}mWo&~u|OZ$iZGZUWLp8|ReB|}5I&J9gu3l3!Lzi2 z)t_v&yk~5OmI4u?M5_NgnV*PM=0i#|foX|m@?^|~x~`;65sj@FN!Ti*SQO+%=z4Lgqe=o|)i4mY5b9b4NBY2Wv8 z4wXpPIpn!5=THE4ItSTs4zd(KtaIo|%e!&zEv8u?+gt1Mh0RH)<7?KkE~SoEHi(to z>ang?$ZPYukByZAT@$mJEep%hIt^^yFtBwYdknr_Pw-U>y4Ka~*yMXksugq%@j6QayFku^A%q2>Od5LJwb8-_Ej0O%5gJ`K``Tg4R?7oj!<6XN z`D)-TEIC4%y!l-lORw;Bn~JRs~x8bGx$iF(GEK%70LU%QVTjk$ozwj6IChJAfZ`=IMG zUEt{7dAP|%L52V6pl4N}9%PfErHFQL8DrX4^f{y(DfVR#h+-^S{3>ymnsX zd}<#Wx%i!&Dw~|@RvQWR^%ANX$E|GI-lW)zziOc7l>dQ2go5q>T^A4|dkx86A8>e2 z*s8v<8raOecUBw1UG{Q6wb-kMzmdHxe+A9$Rkz~EUxV^j-~G;f=<=Ur816h7YHxOq zlrb1(G~*uE01puUY=-LMk*;89lTuHzN;53lAYf#dLD}UXzg|9jwb_^3ET=h;DCH4) zAe5p^?#^@1i_YpHSDc zVQyr3R8em|NM&qo0PKDHZ`-)?Z-3TbF-N^clHN*wt<C5eY#$jL3{ zqIEn%T>4`a(I;OT+U<7xU~fLe1 zHwdYah(Bq6F|TUnzLS9Bm`NlFi$%`?2#`dS4gCN~NwH5@D^TNNKycWDY^>BK3yDZ5 z8hbH{a1V~M(FCu}XsXS^L=Py5(@(Xr98-)0&IryLrVdKTZ~9h4-+W5@3t z_-!Y|364V?2Ly|r<3YgUkVu^(;VkM(j3VLV81*R*9RR~{66FAJoG?Ko%wfPd$PwY8mmn_3aEJRN z#%_S)0PALfRNxRu4CkChSdMTiOrK;FoEtp8V?5Li60um7gTzC}1C*swiM*V!v!D}h zCr#e=JMFgfwMX?PGz~N5|1F{^7LT$3ERg^0gTvjb{NLT*+sOZQgs1Qt4^T=am=4#n zWyYZJou_a$A_9m21phfYeeVqzkC2o&)G|yZc#Q*!IEGuqi4uJRB!fPN1PLRPB!e+a zIY=BO6iF<6#~D)AM>Jc2B*p3%z*CST3>*)MkbDexBg`=%N`XXZhy~mcN;T<}QdI)Z zBAri+g~U--uZt{u4_wz#GzFGQcnVWtAOlq#xI?kj>_po4j2O-ePGTXEW}wQhNgyrE zMS=nh9LW(@;uytQ2{my|LN!#8JzD~Ft`E1Ea}r`*C}*;SsCs8BMh^yv3hX#$F{p`o zl>VPN|BLQvoqf1^bLq=Za#bFS=6|VGwKV|HhaeirNb5ztgf z%>U(_YyAv39;o5ZbYz+c$uWu`WI-C?*l3noreG9|GIdZt2P__tVaiPkrnMa5f9>Y@ z;X;G?e?7u9!Cd$JdFc5*wsHt zbeRqY{6vwJ6e(8V8?e( z29WJg4UvGw5|KDx`1Ly?3l&B0$BP_$(pS+r-CWM+Jm_NJns z^M7>Nd#{>z=%kEAD;OgFg=r#_H!o-D<&uRxPBr5okEvssd;oXigl z^U89tJyqd}bn|y+(TeRFTJt}r>SjOOMg(Te@sX5?=(SoyB1dW84_MSvqu%@>LZn!< z_V#!8pX%QMim!3X#9*PoAnIl8#I zK07*nM9L%#u$Ax8rgBe(c9X-G|Gsy9nya^W5_sbo8eL;MMR zXcrM8?He&>#dhE`h=daPUX{W1CuDN%&`#YgVlz`h@0I44^-vzmQ9Q&Jze=W@UjFRY z^kUvplVWU#B#_`98;O)XnSr}ak&qL9J3HW-0=WebH=9_lDR1@!?lDSGpHL!+POakX zS3A{FWro!|Y%B@771e@wZ;H~YVJ zglwWK<#sz}a)yA}DMwC=gPlT14G^K=9!XF#R04}}ZuN;$A(i;cC5U9Xb;;UoLP>#D zL`z3}ypIAOrE}5`{%CXdEVN)0*6Gd zp(hlZO-Ib@h+Bg1*3c1(&9~nX9&8P<+{%9|O{aLcLoyg`eLtGZ%=I57iN`StBkV`v zm+@HS|I^*?RL_6+JBOR~e;wf|T$m9s?X%-tSqGVBp%!{QXB;BNF?_ULO5pd*3FRVl zLV=T5;-NX;5r1swo9(3VNYQRXED$Z0V3jN$wuqIb^%Pm;z04KG%Y9sSV4L{Z&r7@G ztMW3DYa0Rr6f12*B0j{@*y>OW_!AMjmLX0k8>=0?+M|_fA{hv!eQ5rA(-t#H1|%?N zRS^zGC?+BjAV)}Q!?DtiVkF5erehFkk}xiD=sV7F$~lfD9lx;8g|iewD?S58DZ8^; zzUO#wN&?OV8%VfPde#f*BVi8VK2Ef+Nvm2W3=(3bL=v)GlXe^rE>ACQDwkTptECWO zAbj0CU@@iGsCqKa{F!W!lGVjy|D*x zbgHV{(<$UQ!9D0RMlp)@o1sz$d*HsqG3JQ673NdEmkk(So?bR(_O<)$F~iLHKjq%K zx<~k;`M=-oPT7ABHuHZi;j#R`M@Ix-zhD|= z>Xyyt?qp;WqZJX( z8ONbl2m`NlH$ApNAj&Vn_l65P|y6yHUah&YvTctDFOJ?pnIaftuT=w8)`{hqnN8=F_QVA{n zubdLpCM?!&4Lv0#z2ebJIklrj5AGkdQ?yo-uFanLx&?&ZEV^RDJt|hDW zbMEJ$9|XR|oXD|xkVvM&{7j<=ar1gW;vweRhekj1H10EYbDeVP?l@2_O-~nqvLVH{ zn3l^1+ppjJ{OddT3@2xAKfq`B{pjNC#mUvl@zHxlIVaN7$yJ+Y zqdGxy)PojFq%Ac^m_}wt+cGoi&#q~|0U6FsZT|B~OdRJaT;d>AZ34$EmiUuYb2L#R z8w*Edaaok9Yktb2C+>ivp_-I9({(GvY~OOVerp0ejgQ2yLT&nuog!7gPhK0(C&^R- zb8usvzO-UcWa`1e{{HUXgp#)3=A;r%R8N{-s?tQ1s@eru2<_xAMNC*3k{+{}Su}$~OWQ z>HppRc4htF?QHb_^#n^T9w7mJjN^&VzYoVF9NYkr0*oAn{#ebon8ZURW^U3x4kQK7 zn_P&gI;xqs4-YWJQq5Qa9tC%{tJ|v@Pq%;8p8H!0E6x8UZv-ru|9jot%Kg9YUi)Bk z|7R`X{=W4bZb{VB_u`aBuT;oak!qG;gnOW!)Jk4zW?j0@ENa*J{k&Q~HKXO{+x%MX zzQ!TC<*q~^_ZTU8k7$>?-$@*@JCR>rGH&@O3=4l!wfq}9fwoY9T5ZJ(Mn) z){?6h{X52QC_|xufTdI=1rdyJn9}?X0TbA!=uEe!j`dWmXHj=E|GFEjc0aZaMeT(2 zR;`!iRWI;hfCJeB_l#X;1yHZe!J5(++&6!vh`QCLsVzqC&|K5Szo*3i~N50`}INrwldp%(#{lD;+Yjzm0 z(EeY=|LE@R9&YUaYYF+)8xpEDQeWC|wK>!U_vGyD2N>9O{Cr&7m&1xjD47rpHXe z_6UhOFY&_gYFJgh#ZO{W0?LM<)44SZS>~l)L%lFYYySVnqw5JJHN?nXGSr2s^zUDtGFThGC|$ydxaLrJ~LJmy%eNT@}q z@TAXmVB_{? z{qaISq{)z@5Kkxe?l2jSq}u~`*DXiq2S#3^&y(C9++dtIc>_&O2YT??(^)_Ma&>hM zYB9z95B=sBeZr%b+UfZ)VnXJ=&F75IOqVcTr2AO$&M{onAkwDvxl=PEaXdL!QvCAQ zmfny*X7>bd@c4xm8%jRSpVviZoeLfwG!LD(p3O&dM4+8Nv&y_xd#G7$Q9a_y$-HMn zll#3n7^=@*2*EttRkom-yyyD(s6MNG#ALGPXGtc9J}&UzVF~U){Va}}lu|j-&L#~| z=vOPP|C3EXBL!f=`oF(_SoQzeJLqigf2}3hG8rX_nA{n8U3Bt-D?(46Rmm(8 z9Au~XlmtkWCX;>WZ`kB!E+fMuB!iK;!k^zHS$$hLEC2lc+T(_0=!8VXg6Fwpb|^jx5p=Go7wYU?q!pSY6csPN2 zv}0$viA<+Nl(M_?oY)%?Ae!xGr_%=3F;1^CjY7_n>N^jP-oGz3K}pVtw(I+E4~nk_JdX+rBJF z0}T|?DI`yXJ{kYieam6>k;pAlFG?FJ54MW!(su-Uun9$2N&YX11hvTj_n=d?|LyK> z{QuSymIRV%fFQGI0a&L=a6W{LS?_8Y)T z|2~OB5)Ye*frayb@1Sb`KkRP&|JM>$nEyYUp8uZ20HgQ<4^%;OF)*jL4uB=>mam&g z`+wpev-^7)p70_WuxS48PObm@8~pz@g<7AC%4q#Q zj{m-POMai>OXh#mEx@Arf7q>l|6{Lxu<`#}OPDwRzs|&W8ch5;!;rc9Ub4lSQK}*g z2kW@f%dWfEq_;huAakUfIz1hLrV< z{|t$TFMwmJfe=AEt4-dcICP#ujEBZ?bvxmBKtAQy@c*^r!-trTH6}x1iUbMP_wlgr z_^&UoFD2vHc?#JjkKc|jAtYQl{*cI){%eZw`2GL#mj0W+7!6zMPyQ%ww9|bpQ zqVGNm=eaNL66d+!M>o!MUq*@Z{C}LM@EhWUr2|GJiN|I7oSKS(S`IKtNfql^o91^0U8 z+2U9*dW*v?=)qR^_3>f%_0|g$6>uU+fT)godE7bb9OY3O!3Ebu+#w#5c&KCEzWwQ_ z{c|3Z#y2s$i*?Mg4qFbugX2AWa2!(@_y4P<-0gvTY7k0~5f9xLSzHFBp`z6i+-tQ& zFv1b?86URrCy8UBkJq#Gy1K`zwkpT{GkwWbV;sDCW!3uGgFf%K+Q!c#;o$-9pP94i z>0}=xF}&7iz&V`FA9;p2Bm;Q%ke4KfwV7Xx?ThST$EIcBF9&G`Q}zbXBnVC8Zx@% zMQVLN^4Y_E$vws34E4a(p4}E19jP6Ls$An7Xd+Mc@WO89GSgi9n$7qm)`y9WSQpfq zvd;(9Z1$Hqf>IOy1-YjDE%2t}laW>nW;L^Z0c8MGx1mFnS7lA~+0 z-7u9OFb+3(3?!bJ>cxzr-QR<`i_oZaHqnnaSc{BuL2(?a(mZ>xT4myfRaX(~y2_&3 zl#@PYl`P85>-C3C-;EHz3E5rj78q3k4}Ult!_OCmMf#tC(p??<^~p;6KYb*$(f`&E zzH$6lGyKXo2fHQg*S6pFcNbv48m#-~xUbUy_!;VJ_Fzm+DA89+6we6;Y{zf=o$p8_ zHJtw|W4}IWaQ}O6uj>ET>9jZYpS6Tq{MQUW{ky|{%|3591NYTlnfJcMX+0|Fu4p~TxTPSo$1S3svrpqkF2#umo}OB zmKNUQb!euRhj*vlZaY=q@=PDJy}5O|Cdjxyy*xZ{%!LJGV8lp(3z*PU zVD6i_hp1pc;()8gQG+tY0nWgggoaCHG2r-?a26X>9&JvgM9r&g)MZ6^0U`}Xiv1~x z$xYhFKAa5H?1YfP9g0gev9hIc_U+s9=V}b3E{RnV6#khC9UmzMF%HxK;$v<01&$+j zn}HdmoQ|Q-k!~CPO_{ApQ*Zeh(H!^qX$2wq=`={PuHpnxd2x=v3rh973=P-+m9bx+ zG{Jw}t*-yw-Ocx3))UH#wp!|Ab~izQ&9&tWIPqW3`EuWOs{NKzukOp+H9Gg#Zkrz= zEIt31#D0C!1poD5%Ky8&xAFg8OV|LrZh&3CZD7~6Lsn+H-@gs6a#hG$t6Yx^S6f%O z9|x>9YuDv~YQF@h)<%I{8Kw3g3{txQ#Wm|M8^Lw0IIJaL7E_j7vc4~T6u*6l)uOoz zq=W?xUae$P=-iUr@>fB!wTryj6+CKKTK}KeLM@oX`3D1w?Ejs;!|MHy!^4gL*II&- z0NT(vAQWp$f09tOyswBg`bbct(>hogW^`)Jel&>DI?glTMW+if`vyy8(fY<=Mb&gJ za78Wf-EGx`#~dK==$JlA#3+4{&wroPTj>QV>CujMI4X z44}5ZEWRe8{nGdS>}hh&Qbgy#Ra+9H$P-b9zF>0ZJbSREd2Q8DU4%+hZSg#zeF#(w zPINE_XSI$)QMo*!b*j{ygZ1-ki@8mh5td&6m&AU3(g6Q;?{Mn-Z-<-vU+W2vfd4vc z8C-UOU1iyPG|bmJuG71^DS0;^v2_|5yL1R{fyY9)ga8xX}NiIcv*+(x<=%_ z$HDGfFTmcTK!)_4{@GJ>2YmQW$LS|zP*5Gy4WN%i9RpELYlu67i zP#H<|$HD&1yl(6;3;UOnTO4B{&N=JjGC#%V9hOyW((FUbUQ1=I#%cDI0&F! z=~79z)V!(Woi%|FA5IN~tXsX1@K>!nrJr~;$krZscet;2`Vp2R7QQ;K!aG|@%#{}} zU?G0h)!uWp$x=>VWf$i<-9!CV_)c&3^>{t>GiS(Uhxp8j%^Twj5ZtFHL}MWLdCQXt z*|0igA?`u@@bK^-v!Pvj|Fyswv?AIqv?*C1F|Easbx4Hkbmhkxauk#jwb(jPn6Z3UmUA|Ae*J@a4{x5<3 z`lJc|Yj^7VZ|%d4|NmOT--iGC)y2RgW4_L=&&qhO_505C_W#oJe@X1uCr$4E98Tds zA8h=8))T%Q|MhDg|9$P&`~JhK^Zzlw|I=<)@n3hlyPNfYE#V9BU%!QO|22TVTE{pG zL0@5Vid=Z*yJy+ea(7N40qiU2HRa1AXB=RP+7tT*J^&jr`eTQN@_%LQ-zQD%Kf6`= z-)Zk}zW=?Ju)+Ubi2v&|PKN(f7~qEUKga$xTA-@vBiesEQ}}P){oT#?Kh_c+3zbM> jiiaFUdT4wP1Ah}XVG}mt2M_-n00960o8R|F0NMZmChM*j literal 0 HcmV?d00001 diff --git a/assets/rancher-backup-crd/rancher-backup-crd-1.0.400.tgz b/assets/rancher-backup-crd/rancher-backup-crd-1.0.400.tgz new file mode 100755 index 0000000000000000000000000000000000000000..117b450cfe5c01865f0ff29e20a980c0e4a0d4ff GIT binary patch literal 1688 zcmV;J250#niwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PI>{Z`(Ey&$B-Tkv()9U{!XUWH3;ynbQp&x~{OZz6=9OQ)inC zMRFu%#2EJ72a2*SOVl4bX_BqJJOm!D z6qP1zdds!JjA>*RfJWnMjb=g>V1rKOWM(B{h_IB5lJwX*%bm%>u=CU+-HSZ-Zy=To z8_+Ma)41H=ss>C9%7T zy6ET0ukSx#vlya48*CCCXd@J!!#k1L9zyHccAA`#YNIHhS0K67-K-?!KsKK(0A`ed z<;t(Y8jb1xkU}@(NFbZTnSgn}3pU>NG(xQpKOhRyQ@EiqXoF*!1H78&_ZDFT6+0t> zJE`d3zyJDmV8_3DbvgzdtxmuBBZxt3JkKw6lJfahIaeZ)L*!Ed$DB$UA*WoqgOe;# zm~E7(gODb|LXTLYP3nVmGTq=h6u15%`um>_LQBkOlpcob6cYH=hqoWkCS(CIc8v&0 zC8YImYm5{p)6O;d##jPQ)GBo=Bf)2MJ^`Yf9CybdwBv#703>Fq19B{g2yQUtl+V-F zEwJsG(Rl%`aLQXMs8j1@fbiXh)5nz4IEja-=f$%Z zz&sW0mNZ#Fl(71pjFjM;kLa$5sj6QreS zLT>%a)wt6&5e1Re63nU%=3Zbf$UGDgcq9caYwz9wqpO1<2?+zWpFx1z1;UNds$H zSgl!&M{X?`YqYZVipiVp(0MM;Zl&kdwN`Twg1>=rF%?O z;h!sgv(GPYBSr=!&FCIkC{|%TM^zCcHbg89nOjw0lC5G4bGVaxt)Ag2Fz?y5T2Ot+ z8)HKjZ|&hRHi+sO(KQdO`xz5mDyNppe1HW-C`T|yT?o|^88&q zpWf__@P^8*=c=D2Rfcy~-6}Sper#jHrFww5ZFryl##mpRHqSm_ zCwca#-psy!#Q*lNz5i=nPLa7=c0tcBwP}j~jZkpk0$@Y@f8IYI6zhNe%kUWgKL&l) z_}?ne!xjLYnod%@)BxOEu5#4PwKrDY9MX^FDqe=26KD=LaQRzNao6LQWL@XJk8qmJTbgbNbIpoRz#&52!e|Zzc z(L>GA^!x8#|FZb|@80F$nEyQneTBwO1G2majmFoV&ef)sEU+@jEU>W=K;f6uiG~PG iSLoBO7JUkirh23!9qCBVo&F5~0RR8V^3E;*HUI!o7BQs& literal 0 HcmV?d00001 diff --git a/assets/rancher-backup/rancher-backup-1.0.400.tgz b/assets/rancher-backup/rancher-backup-1.0.400.tgz new file mode 100755 index 0000000000000000000000000000000000000000..0de3d75ce06b54ea6762abed68c9539697e4f792 GIT binary patch literal 5742 zcmV-!7Ln;6iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH<$Z`(SS_v`vs94MXLWHyqW*i8rCodw)9b2qrXZ4h@_EIPL> zEgjpO$f8P8PJQeC_XQu4sfX>n>dp*86sRnb=fTH$@sQyp38%>Y5ecudw0$}yTm}m= zi+_5!(&=-2h0PmWJcp8eE0>UEwy`w2P^NW}&wJo@vD$Z*=& zG(;}xm||44YpfPgk;Y`OXG3C=K) zjkPlBhisOz1QS^UOH%q4xu7iR!~D2K(zN(_6m)`KD?$-+no9lZ!mn? zkflJ9NHH!MN03u2#)712OhY|OfM-cUlSvDJB|wq|{F053Cn!-s7-vEvZ_!X;no609 ze!CqinIZy9wVqQ#pcL(xO_*rgsbuj6Vm@hI;o_R{Nc3C2i(9mOpvjmMA$bBd#%>q9=yOY$zlnj3!^N&LJ4EWK{chx1V|i%fd^w6 zD`CNrNQ_{#(6VM7F40xgVM0c+QY2#%3v2=G?I{2eCX$kPh#^P$ip&t`7zD~g5L1@L z5sZu=Q(^+xSU;P}5VAxPszezK-VGgKA~4|JFvtYL`QTT$WHBB>#FGDz3OdLs6)>h4 zM@m{ZkkJRvTl2~4E&ZM2M9V4t!}OWcdd%Vox&47jlrpNMf9dDxz`me~z%b+Z=H+mB zMU@7i<3-+GYIHb`jY#gd+gF9i*7}!)qMZrk{v@Ljwo@fOg~UYqa~5Ya6m13Id!RzWSe{g&rsdp6@j^^1MU1p3-m%5fuuhfM}7B84W=j0!o1x zHD{n@iKPJc)g+3{Tt5Ic!wsS&i{nE`SPdD76holGzydQr!zp7zO+E+O3N!KmV1JSx zD2)>l2d8lMHr;F=`5Cv1;4cBoqJ0;N=sE;u_>Kt^nihn5LtkK{ll z;S-cVASGPVFpCL?i?@eh&9>4Kw{;C03YK{))|P?(I*AvcY*a!M7&^2l<0y{VwX%ii z?~2lkx8lDxcIhZ)llF1v=uhp=v-Z((J8$XNd)3d>xcjU2ZPl~%ckILQ&$QKQO=318 zv9-o!tfe!6v1_f2y~GI>Qi*ECO~N7^VvM0=yboW$wOW$JYDH?R9^U`fYLW4nCR8r; zXIK7L^uNSe8WTm-$0^2&_&}!jH3hcl|L(J+XBGW_(tGs(c2mB7^{s3L&rGq8RvLZe zc9MDEfBV)_73mm)x5h#=lQefx8YgGbEafzjWAOeU+TNW2U@Wls;jQSwB&t2PzJ6{0 zvGtB75xXut5~G+%6cwUYEqY_1Xpa!o8WM-lQgueC7QcD#tRy9ct{K0I8HoghxqTtR zDMne0k(N?ZCeQ`h`eWM?nD?^&=&O&u1Kcp>!xgGU?fR@ryKA$&Z`*49KOSSKjDc5d zXz{Dpxgzd^clJ3Wu~+UI(>JHNe$M-1oW=11RDJEOE0;@))1dVZLGmmKb$tmZT~N-P zfDwj78ApQ6&~*T{5Y~o~0K!#A%&5$Jh6>oX#@>rpLseHRffU1opx#YJAYEFoP?4yB zuf{$wp(?FlmxfA3pL(RTVKyGqFIu5E)1#Ojwgf2StQJeIscal$B}WzkmJO&}0dpGbXXp z33VtYq!Rg~g#mPFLNmb-`J9H;_NEhaEqcRltn%tN5@KQ$>w9c9w}WO*bo5Od$Sxmx z8h!iLYKT;|S8|~2R+Ts|t%a(WBy2B{FjU67TEUsX?H!_94NRj~&Vs!m^A-Bm;_0p& zsv9jgPjtP9SY)QNlw+eiOPg|FgMK!AW=vg3ve^jvR*8|$!}Ux=US+t8Uza(W3YE9k z|7-0(t2sG~aYU1dCX+iyg|^s#os;fy+5S83JnMEI?Y~`=E1E=oI5mTP$zptN2D=nh z<|7gYB$HFd>F?TJ39kNRMD#T3;5}?PBstG^5IYCn^1z zvn<`vwL1!0uU-EUrZHR0FuDJB;Fk5@>pZRQ|2;cCdiuEjcTucmrc_t%nTc*-qj1G5 zCrVd*vzlj2Xbj%plD{RVdM^<|mW=5n;|L=rry#l-_{G~&G}wm!s#?|+IwVbnk}qae zy(p&R{RGZUmh=ONr4&QeIyVpvB)wDm(gD2@c-@d*AvrN*=Bw3$clt$@xPxc+2t zsZ!Z2HYJ>5cqOt~TXah!v9B!|coMv^R0hw9Kph44+M??VZNX{b;vHy=VoHvmo``I= z!svic=i$xar}Jx)OQ~Kal_ftFvi6JPR`!>$ra>0uF2-j+=vWj27)$LAWLT7!a0zjx< zZCf^lpR=~>!M-g;vx}N#kJ!!R$f4++rhK$qATB~xQL_Idvv{41tIdTP>^zq4$i+Om zejhw;%)UuGtcx3#;IF6J4P@T6Lw>Ze@>m|cthN7h4f+5$w&njFcdGXP)03me{hytb z2K&EB)A<$Sys*nJn)Ji1O!TT|*sX<5!Vrb{l|{Km^x+ao^o~=BuagjUmWBis@`)&a@V_XC5eDY0R-Wwxe@roqM4 z98#}5DhgNo=mxgCudbjv-){u-QLp^yvR3}*ADpDuyXu2&@!zK>75RUB)HyzWl>fUZ zr7&;9PEsN0FYU)6Hbr2nv&9!ETgn&aERK=u)EOlowLtX2#p>BWk7os^0ni6;6@j#F zjX~vET&s6^N}bIGH?Ff+Y>ef8faq(P!$~qhc#;x1J%lG8q5o$H{z79E!Ot-pfv2+* z+t%@Rr@ycGQPi~mXkrUmD;j<9BnN@gfOhoFaVO zsOpdFe+OlKNM^0cX96j=vhf@ z7-60_8F|elJ0nnvRODS_QkTuUJdIhg@4hcJbkmzl+zu+FuPOo+t9oO`i>-yc2iPnn zl8$1`ja(q2lu&CE&hxJ^GY3gLaIuv5GD+9wU7mog_$uB*m9bT3_C~hyYTuKo0!N9Z zRj#-t-%TD@ywxf+j(B5+xv8=y`CP$nZsj-!4cH0?;n6?%?q$9HH~Snm|L0ytz&888 zdwf*Q|LF7{?f;#WH5$H@Lvbrjf4^*s`|JB3-J$4#uBj8IxNCbQU+Qg5*dRD1sW~`JQ3QjFb1OWBQ1P8X>zg5U7RgpY%G3QZ(re8Lg@$nFz;1?>SP#tbzmMGfC&qWo@ zT8WaC*?O5jonF2$Is7v!1WhIZT$n6ZD-TR=JmVZi%5=)0YGSkHM>uA@sA`V6QDHs? zUVemQBYVz|AH7EYl}knbU!DzK{CXD5qKCw>MgE`kx+j(RPq){7-2dB2*{fW*(Q3UZ zR%#w2xbIyN>u-t(S}k*`^1YG|zwPITD<@P=vr!PTS$oS_^aJPMx^}1OERFR=8is@j zC84nhS}k9dR%>r>4}L}omzXj*PsXhE>C>kX5mS8-MnlsF#qxz=dSdNe7EY@yRrZns zS3-7f<)zO3DkIB9Y5i7}GWvEy1wUDL_X+sEIfCsM7WXD}8&PdQQ)Beh1~PQVx86(< z#w?E6wHE*SKya+pk5L@9u6mskPG%^Pi~RISg$OeSYR1VaQeH$t%xx9@R%;0tISc?; znmyMSt7}ipizO6y6f9dyzbMXQ>G!g=gj#;x5)`8*$IpC)($_8X6;?ncnHW;5@YDzW zW!?#nf{xF_Bi+mi&Mu4I#!ApA@0)6PD1g(9bLA%G=iEe=ri@FW0I0tGl_uw}4MoQI zu{awenoD@K@lJ(ufHD9rRj2@x0MkQ4VslV1fZ-KQohuc9Cct=v+@3iqZeRdqjVgdS zLvL1?byU-$cc&vYnDB46TikmHB6i5+i@O}1V*T( zV6iBJDqWv=3gDO}j)L9-i3$;?+Mmm@9 zTv>be*EN+>nz*g@^I@#vE25A~ILjXub(WWWVp`?Gb@O`b(}5-7qz9OUEK)Y6{9vS7fZ!Bk_m_0@PTb?!+j1t67L zbCxb4FbB*eD_y5?2#iBSIfgP`6zvtFi??>Um}OBXhzgT9iV+YApQ?$YpTG`pA*fOo zt?h(p1VlY3KlltQ*0z zSvqiOQh6}amo(&Srw)IAU@RXcgs-QV*u#NMmtZ(kvy`a1yxD~v=D;^iCO*z-sHRh! z2tg-_pR!12h_?6i;qw*TuirE_HwpCa$n@EHpCpl!hzd)1|C`29q{luWH#tm=lq8b+ z4i?Q@$q>1GA?|Hh?u9%O2Ovl6WB%x!}KE4%^ipawFRh{$Kbi#>o*XsspraxBguPyfG zrlvQvA2neKEm_JH%91tK>sGqfG~C9NrtU|3XWze6{C|?FNl2m@K41)Bi~ry4oph`B zzdb#A-2d50+35cpYp3M?ha{EyY@bz~Rvk8Z|89g}%-Kv;6h+J2FxH5yd8`$_WH6%> zu56whs&35^Y=|JJ>I5P>)(K{^ImYl8#$jdxk|hZ~W!Je`pqLmVQM?^msVy0>jKh1S zuU5i4cm0+0)|xCA;q&9*OB<8ipi;k|whw|*hvwSBpex*z;%-mn%lhzsjW)Pbd_?El zy>Sh@DM=zVs;Y&;o~8P7KBwmjVW`J&rRVbTFea(_YOdK3#RzK?H!eC~*|`&<>|0^S ze+7KBfX^B7MQ#u}6bvrT^|?Ichmex+N~6f5&MOG6c`OMZsTyM%o2wWEnWF$IbIZ0k zm>R81p$O`oWoM?-R+<~$R!e{;Fzot{3(oYJbW=-!$ik^|2L|tkB~SZMKhnfGVwIoG z3gCsw2U(>+9_ky`6g6S>`kpEmhcL>NO@dro{X`$`v$nmS`D$X%k#id1MkmE2M z%DK@}PxVY#zJ&5a9DJc095btlibCKSK@0Dl6>zp{t5zNgxmx}W_OvA8O0@O;0#si$ zU>9K9(%?r#5+#m&E9k+8K~BD!C3^Q}|C9On>A;X^?ko|;pV61_T@~|Qd24El73;z2 z4}Ec;0^sz36|^h=Jjm~O&KH@2o&bqp%0hjyqIc(Nn zey(#wi)mfx0Jb%vEm`odWfWHIzZ)b!#&V{k2JcSmWjWx6@$rt!c1`X9%5%R>R%^8cUp zD*oSbw{!C7|LvqyW@F==D|RgEQ*BjAPYZvUPf*|du1)tw1ZFSBKrfJO2W^uDo%21A z-?DP{MVXnxJxrh9t*I@})jbK$m6@-YJWrha7tHP=y!WOU&%EC%aLhVb2}^t5U?b#a zu##u!{dU71aONnNr=kh~Mr0tA?o5<8U4HGK=&PET3d^^_l zlHO4B-SlQb$!f_NSsdBpslp6LVs{ewj+>`t~@!>%y(~$4(^G zRt<8 literal 0 HcmV?d00001 diff --git a/assets/rancher-cis-benchmark/rancher-cis-benchmark-1.0.400.tgz b/assets/rancher-cis-benchmark/rancher-cis-benchmark-1.0.400.tgz new file mode 100755 index 0000000000000000000000000000000000000000..7686247acc62eba6dfd34958570094974ca6fbb4 GIT binary patch literal 5079 zcmV;|6DaH-iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PJ1;a@#nP&%dXq=u-A7$&^KXT285Mugc^2wtO3BQg(K?rm|BG zM7AWXNPq!AYjhsJ%RS3I*%bhiqA1Gxh($Xy>`KKH0W^RHe%%d#Yzo5M35teGWn+lU zodiTon`aXulrtxR|Kbs-)oQi+y`K5oYPIrz+ns*vMZ4GP^^W_Ue*fe}t9{(<_Fh2i zF)H6gkyJ#yXnk{C`ObYOL>S|WD9V`})Bs$fh|kB4OO*1_p}a{V#T%4KMSOn%h0~?s z3h&PWQSPLWgNs7Z5k)U8UYO0JGW5y3!AO7uIJ>xpACn3S^Xq&vBrXm>w}yu(h~lCl z=Tc!%ScJ;~_>@Jz7N&-R-%<}{V|*(N&qbQ!`jJ#9q)V7=-nI+N3-pmlj5|;$slsv* zZMZxLIYXw_l_MejI|@lT8^CS5M#3=j(`h-qnupRAG*sr^rwJ|z1(1=Uj}k=07-eH4 z-jq8?bR$9498CX!GaxXHhA0>+l)BNj`!(un?h~a#IcPRrttF%rx7bV;DN)I$&&OOg zbptdP+YLwFj%!n#&$#g9pw>t$tvw{K+oYGKldV?msqylAqhkGkOZ*7sV;O)A`oGia z_wxGx_~f|K|7Flic!whr`3mfq!-$JwEIPH9@M%INPzeNnKD~V37;zC0rO<;B^-*Vd zhptZq!YvWhY8=I3h!ASc-~nZd!<Ou7=il87?iS<8(C_&iF9m^Yt78Dg+nkHIl69#S*)A z-|5zB0ws@xi*f*W_cef*@DGGYM#T5$AkgJOfXoBNm6n$b(WRp~c=5=jrGYR{XYi5S zA}Bo=L=T3`nRF?Sq+#t&ICaqsq?+hzP&jtvh%Y%%<_c}6B)Aj8T>DojV;i*UM|MjE zX8|%LVMf&ie5y^A;hi?+2xR0=boG~%QLW}3xESfN1&_mwu%(`4ibc%!R++|)xbO2B zWn;tLr%Y!d-oGMg(gR%qLl>naVt!=$FEHSy_Xutz{5cE-F|rvc6!2;x>Q_$8G@!~j zH!lIe4Wj|HTdg4e9bjOFwcUApN#ibljZkh|qQzq*U!&5!xBO| zEvHkf5g&!^p7jd60gQ+*(SYX`g=y)G2xf;RKqaUvKXRc4@V51~vcbP6Dz^Wrc5dEG z&<{~aM}>R40vp$V?M}}ApPY26^xEKUvJQ=VZQyhU=gKaKoDV7A-%^`aFIcnL!n&w7hWdJ`BAe+Z z=5QloCAla?N8tS3dahQ<(d`?1!Zak^?#Sm`d$psmze&jul6F>qvj8_e^BP} znN0LL%&6}J@w`+#icqGE2{-Fi&{jqs)pV92`g|7O&X^i)Zf9IfeNH?HE{}Yj6{I^s zFY?j&4Qj$AW;xbYM$;tT=){{0bK9o|Oi|mU?1Ea`BI7v@;$rY> zJN50Le*QHgetk(YPmP2>CT-IH##x|jEFvHGT@P;1|E=Chds+W?S|=y%O8=KZIlXCp zLnn3&E$SPMrR}GHGwo_pHg;StkW0tqLG!j~rxr}z^T6a{iv%=_|I(eQrT!b$9pJ&P5AG0x;gx}J16}L|7B1f z-OG?kQGI3|m1~!<#jgA+zT-KPA~g1IKH8mi5wBeX%Kn<}E~hou+fMt~Y1Mx($of8L zSN!+O!v9Hy|1#(h`0wwH|9*x4l4*PV<5Z7r>mc~=Rrddh=yCBM&yn@`$EmCwfagTp z<9|HG67k>fRQNB09vA=d99fV5@f0ikKPTEA|I@Ba)&PwO5gs!1wo?Wh@!!ku|8v(BtC2XpXE`2Gj0?&OoQ-99PQVz_cI!Llgm(lHP80Ca@X*`SpLV-S1ZTFM|$; z|HT}sG{Ezv?eQKu6x{+$Q$|F**afoV_t@8ke%#{Y5N{&zdo^M9q# z0r9`315jyzr=-2`zvBkLUiQCw{-+c=82-210C*7n)%oASX;1tg-2Qi475>Yh1LFTN z?0=;J4osWyukQ=)e^W514fyZ0TlwdITkU@3|Cd5}{EH!R9TKSt7xaJ3%m2>wtt`?7 zSKP-nuK*D*psXe$ALAEkNl5=9coe2@<~EiUO&rB0iQKcuv%@5Z9ntz z{&IbV&vg=p(lA)uHz6~0S|L8;%xSxKYf&6is7TzLjp#TaVOB0_>Z>1Kt-K8WDAVPZ z;_N$|Eq}0iu%7=@>#&OUr;YYMnF@!q|E-hb{QAGu=~e6hQYhBZW|S{Um=83&s<7h- zMU7&0x=R?3gzq8*WoDJ}G3E&&iZo189HCc$nuk~ey?PkHZw;fQ{=*&@+B3+wMVTdt z2}_{!{pX)IX6#$moh)y?5XtXvu9QtHSjpWl&W%$!fI3cPlB`woSof+vUbN1{`|IkQ zOV3xYFx2_t;ThNfTbX=oR#BVq?_uckd4TL2!hvtVfA^%f;{W%m_y5YFEZ(*MvGCj8 zr6a$_ZOcT^*#-6hekMrIRv~&GJK#^!z?e^_cbfbqmRmCgNSbM`QQT4&PhFQsOnq1p zS|iRtY=)%$oGC&Xii|T>C~!+Qa4x)BlKI*&d0P5+cWJ06Q`pIb+;tXVMB)B^@L*O& z#&CcCYPlsJBj5kXed^8!aO%&f+PSH`iyUA8sx#PyceBsVa_K_YLyPh8tcM;5JRU`E>f1!sa|si52}S z_n$ue^yA-t`uPxji~}2Q>fcfew(VNNdrh~_F0OA*-=AN7y1uzQ|8#Y6c75~lr>jq^ zoL_C4C$ApjeSQA-b3HZ>wm4>+1j*gd-185oKfXVIw++W6+*&3)risW~hu`39#1$4+ z_5N?ypU$stKAc__4|j1W%Sqtq6UcvtA*sd$nocUoGFhF|evY&W|7O!WbrhDJ)@^mb2K=|XEAd}??F#?pP#*t7J>Cn<|2H2NAl*!g zh9otB-1nmZ>J#(>M^2g(&ZqSOgj}lR`%y>sR}_u~@o+VJ!ydLYEST#9FgIo7x`-Zn zM=-<@7uYbj&0!7YEY%y!tGaJrj0x0JLnSMk58Cb9xK1-du@tY)>U(+KvC|fR#bN!@ zESJxE(e?G~velxwELo?}m*V<&>60m zM&T2mBKv1O{`SOue3W54GBGx%4GmU5+GsU&u_1A%d^CDb1F8nluJrs4v`PP)1HNzk z4%8<7e{y`1|NcX#UA_NX3T=CsJ48h`o#;G0KAOG>VRlWaR3hn;|D<~Uqa1on{1-i)l)eOZSY=Qcc=oAf`#sRU_|Luo_O0)K zb$hLq_rFf6{hw0k(D+|^+_5YTkg4>k<@?-e7yJto-T!ze+y7!4{C7_B_P=vd`TwQR zW8y#iKy@-l;^(dpiTt9VvrjG7?}qll|1P%w#h(6utMdO#p+n(+<;{z-G(cthccR_# zziaf@ZSbG>|GT~F`~T%o3HHBV8v3iX|IV}z{&$Q1x{v+WKPvo}L8aLLGBrSD|97Gt z@m~}H_M78Bcf0+S=YOmC@8!@VQ0vrjGI=S_R!f2Rik_OkyK{!5_);lK0;0E#2eKDBh8KkbeG1Ka;z zb^cch9UT8pVgIXLfURi<{Nq&a{OG@-P5AGeiBva&v4Yn=GI;HCK8-@CKlJK+9qO6)(d$sx|K7ZT z7BuefYcEacxj&6~#5@@M89eH$OoBv}B-ZoZMl<7Q)NN##JxZousM()I?Bwjv;L3b4 zV70s;Y>eQXk&q0jPZdS!oPNBB^`Fe-q$8LR`B$7L^}Sa7S_#U=;Jk_R{ada{OIfrN zmEniXTl>ql>Q6UE@F)D0N+tEU#XOf<^cugWSxncKS|adkLNy@ zu(1OljnafzpMaVmxLgR7AvXaWbP;eTM5s92Q<7DusR@-f(J|9*WA`#Yo*HV;l8W{J z)%oeW%X24qoag8_`G37m?)<0S>z`EXzf$NWT-g}_XBXFyYEC%Q7^~GjX>?}XH?W~< zC5VVI1J;Y{#rh1AK*eCbqyn4)$;5t5wU=;Vb%BliR{QehOKpOFkZO~{=?qR28a7^>b=Hi72o09=OigGu!}F;} zr93LeKMBKzK;i}X7&>4B{>@?UGX9TGjw}3^LTm6ZHS`lxn}U7`-Uj}BWDR%>>`0q< z_^esIK0~H&jktgdeZ^40bzfImf#MKJGz}XeQIWtS5I91iQ+Oocv&Y>`y)gzHmj_LqlH6$C zcAQ_E$jnFZj7HOUj!pd;>BPE6+6uYv^O=@{)@z(0Q*uzNHQ;kr*DUHDQ#FZ(rs|~D zY{oaxfE67)Al)0AUUsqJS1E(WB>Q?`cD5G+6WT!~B2Z{36m96Oiwd%hC z*M@BaPE7?sb$RVBzW$PNy}!J%96TIRh7x8IWP0S4pyP2o(Np`BoxMXM(MxXH(jFvR zm2jFgPY=AYB_0sdS)Dy;kLe96+uZacHoCZ?MG5daRY4h=`HFdqT!OOIDwHoGK?778G%ej0<9oFFFJS08CBe@ zdvy~NQS6}MgOo#q#HC|hza%RvAr$Fem{tJHGo%0c3SHFwk>=JFj}-1=Vb<2aG@+U= zO=3V88Ec=-q`fiA>m;9*lNlr)saWL6u$+1x)qIHW&zA)&QNb6Rd0;jUzF38TV1ot2 t&!?B~HDdjxDc zVQyr3R8em|NM&qo0PI>pliIivo-@Bfm3PK53^1>_O-SCW>~1Dg%xw1D*gc>fSyD-D zHq_Ss_bu55jIr?;JS3^m7fNdBYg_udTWV}T1`9OZfZOg2Ww40AJa+>VzMU+{Ao=xuzxeA9`~9olz#j~TqyA`gbLIC&J->g&{8J6TL}E)|uKbg26<_WL z5s*?55LeQUJB$TDAk^FofJmUn)!P6K@h6Vqm}Oik9k$?Mh;q!{kcsh4=@1DKS`7h? znPZ#MXn>UI+La}YN~ahU2o}3;>c;*)S398j7c`bDIcCdVCqx?dY zGJS8^lQQL4UZ>6wtu&4qXf1eda3_xQvmPuw6vf)EH?5J{aV#~SMe+-}{vV>JeDAcSEu8^pcglF&?)h@&(Tb=fZu zpYQI07GvgF0*b9Cz|u694*|$~qn^0HJBYBu*xKeKwV4}`C{wSsWVoDHK-)Nqz^pt{ z1R{s{xM?>4(O_*3<9^+20PzGo{}}&UHyc3MXRdP)59?+Fh`+$d9K;{%W&;SR=r#?P zGm+n_J4LLZcmZn0fJYvoC5ZCx_gk6jAf(2?v%vzxSYYTlGg0!1&%MAX`H=PJ$yMww zfX|JJb&}h1R8J=-jAktJp)E<1UM*q+V%K3hjM+fxEu~#YJTsZ&Wq9eLjM?A+bQoKL z;9-)JQl-);Z|^^TxpvPSLE6cfHZJiJUt=UVnLnxj~uMCt-bMqdvIAK&m zx5HF)ar@DLv5kbXEmLOS%hP%UlU+)5Ip_VqVf|-YDr-;(s;U3< zN5f&E{&U?Q_Ad3GbI4ihKO5GZUI=n`_6+&ns3i|G_uZ-g>}YCB01=nJ#xpb$32n9* zIm}O0>Uv=KdJwouT?-r}yYOzks_CWq6X-%UtajK*hLTeL3$1O(o?bKASg0zVY}xjG zR#6J{ZRx&tVD(?$Li)E(D`v`^rX}POM{Wc8hdvlI%>Oz)nm79!s3!mS1~;QZ{_o!m zFaQ6XMb47{GuE_|{WmP#RpRHtX0{9j2W`DXog-)FWdE>}xM`)95n(b7w+gEbY0 z1NDX@Dmy=PKt8iHjQ<9;YW!7DUHl*QuZ!`2)E`{p|2gC=@jqovJK?|ee}K=QPo~Lo zRHc(_v3{m(@;dd|bRpe#|MaK)YN?L@(+{`rKYj3`@MIfx-+%T7#qU3dgX_!ppJ$Q( z9QfGrmQ7O~3l?CA(E^C^z0WCQiy@n>sy?aw)^B)`i(KR)7b%l}0{{U3|J+Ee_y95h E02}Gmng9R* literal 0 HcmV?d00001 diff --git a/assets/rancher-gatekeeper/rancher-gatekeeper-3.3.001.tgz b/assets/rancher-gatekeeper/rancher-gatekeeper-3.3.001.tgz new file mode 100755 index 0000000000000000000000000000000000000000..d38b61135fc081e2e073340d499115cd7b2c3ff4 GIT binary patch literal 7398 zcmVDc zVQyr3R8em|NM&qo0PKDHZyPtWXn(!?SIm*WB5{wklKf0{Pd>oMc6);6Vc2Ou90Wl` z?~WvX-X*srSGF4a{_Pih>{BaCb`-}!MHG#-B!|NpayT3gIfAfYjG{dv3NH~86rH0n z5z3#EIQn5VsoU*#_jh;Azuj)P{%>z@yZb|Lx4XN$x4*r=fAB-Mw|lVL`vJNuYX8oZ zN=3vE-7jvd-ns825yrS8igG6V9zZ}8iTKD5h*A;yly^v~c$+e*NEG#9cDS?y5x$Ix z3dW7q0~Cr5DTaC7#$q9rNkpb?Mq=#4yVFDXHE*G@f-Wc%5?~+f$1+NCDiy|!^|n41Vbp&`olXd?O$a5khPzX0sZv2QLqi;Ru;} zCLv9v5=i0_d2Q?-Nvi^GjiK*DWAQD4O2_<+0LoOhQcp3sDicS!aO{m~3My8}U z=DNFMr4rfibVgK-(+fY~afc_!+72Mmm~=|rqk+act7VXpzN48LfKsB8orsUP?38&i z8F`mDz2YL2eXk9=5r;LHM!Ysi3{pYW)SJr*$x{)a0dP;m{Sj5(B;$l(M7QUC>-l`& z)FA&SBuY`PZ~<5#|F`!Kc5Cu~Z@ag>mj92D1SSy;i0s3TN79h0S4^Rpkf;wk-L4mK zCY2zRsXr)>>^kw9##HrTyK5`_B6xg6f-&}CNFs?|NF}+5@DCEvkSNMV|H6wg=a*Uj zq@MAf#>i83jsa((!33FxaEuWue3zhb3zP$3nmrn)?DCxcg?;FH5g)zA2}XT*_4b!{ z9>!E1hB1|TZk|lEaLVeMEM33)nZ{&uIsoVg3zV)0yk5IgERW)LQa^1vH zM5KIcB&JS^=c6wGDQz7A2mcAq7WD!r-HK4c`(K> zjVK$v8Zn-KKDovqHH(+42GBMT9kll-;v!3@h+JTFju8fmi)t2tQASR#6M=^RvYxM@ zU*fd9#`5&KExQC zsj%1EenZVSfq$kbXRFj}Z13&Ap?0c1d~E5V`h9hH6!8lZIaOLkXlvQ79@|NIh9fGK zuF`6GU~msq)OU@|iXT_N|A$p4b;)-V6oQ5bH=sny0@&X%f719o9S-R=wAuwQWTE-n zGbkPjBnpA#Z4A>Wn!=wciOlvANRk-jeeYjrp_}qbx1l>C;Q|AaN(9Mcgx_+BVY@t} z7=;puKr10I1jRv(sf6bjQ$vp9x97SnWh0OY2AjV3Y6xztXMwm?Te>Zk`9>97(I|oo zgj8>(hy-Ggci7%VU0tk7VLHqSnq}rh&C1LzuoTrJSwQ-S=|TVH@{BFvnG?f8@y{%! zY~>u+%GY}%mk&rqB9cV!5cc&jIk+W~668veS$VoMo4mjp>$1!IXO?fApwups8lBA}8+U@n{&p=ra zrD2nd@Zt8>8)Fn}UCVTOZUsjYQ)9L*V~b*eZLiAl(j>98I?Qb)&9L(Ay9PB=Jd~y@)lW@90qTsx9-Qvf|40<^D-4ZN`UUo&MfRVagYByRx82?C?ymK}$4F)4a?bdw z$-6V@U7#X8?~<~x4@bF4=)EiN#i&R~6w!MayA)i$kmbSSX@$vbibUvkMDBx5#+F~~ zo^3O~y^x0%!kK}P@3f8WsBA0Pfb(FCF)`L@z4i_gB1nu1g&bJL&MR%?+2QG{KXz=D z@|HajdY7+geIsu=P~Y3DX@Y$qLXc@KhboL4OaZonRD!aRF&2ptW_!xE``HJQ8Lj-w z7d0+;88#!{hnpOJLV`;&!Wn|+1tyu@?!)I+EJv+A#By}|5k8kt%&kpY*P|>+l`n8Y z@yh2H|HeT1np=KeN`CnCu~b_UMWxZf`!C_ci1X0uCLA(g1wVX)-G@&f;s1gu`4aT} zQ}T+c@$g+9W^s@{%JAof3tu6RaV5JA@7R+GT+{upxYPVm59h6aZv}VbJS@NwV@6$+k zuK%SYRV|rVJKVlro2`OK7dd&;HDS9Kh_ z8R|o8in3+$$0(>GBeWnJHg(UO#nybUm2%tBXSqUDpGvt#WN2|J=!8Z%!V?(~yFO$m zQvgHxtKezk48(+uFxMMma@~jhy}g~i%ot5sA9}WA8et#aq*9%w>`;2vl1&AM$ATwE zBq0|xqM5$ahQrsdbs1gucZu#)r3U0JXJ?!%PxJXTW$AUflqqq@v_d4)B#AIarbuLv zmtT(kghy$dA#1%b-jKxnN(9&IB8KLdJ@GPshWv_M5fL7qzFN!s#&kdVFYVwbLBLbC zw2#;V`QP2^_UiJ#*IWC4JxZ!M7g?F_WZa=;b4B3)ZRE4tmRc9HnY!k?0r0IE_=Rcd z`JaYVwL?rIKGkylKIWhW^M8L<|J&JF+kYM>mFGW664}Xxz;Vv&ZzKSmk4ig<2*Wm=#Mi zV+nR-y>pZ+jbKBzvOknL%TWA(NgU-zB~_;^cb*0}z$gomDR=?5w+>cRVaWYi@(`vh z%a$b>vkYtQ!JH-7Q?&q-RPk8r8dc5XJ-)g+>xn?j<*=1YOzh-OZu9Eed7*pk{qj85USFL$U*6ZdY>1A~wRr91GDwTCAD=t-t zLX-w`91|9nxtRGh=AV#F%K28@p1wQ&^!D)0$@%Hw(MiefXgu;X#j2Kga~N=j!@7OG zrUh3sO{G(!#{FE3__<7^QRGTsoI{ zN_s)zAw3U7R-&szIuGq1c9J{CSHu8(e+s zD^hdyl9Np~{L0$E+j$#xLoF0BWrh;J3KHNc3TiCZ_DaEi_j}){f2FkS{=d*H+fLvv zZwy$l|3BEvyWC*K0PbJ z9A+)auC=A$D_RQXvSw-Kd~<6-?Uvcx>u2yKSI`z-LR)YR?P*;^Yrckd--~GVt7x!# z+l}7JynL~{yR*<}P*^ZBueo_vjOta;FEab!HO7f*=OX-N)3XCb@xl^PC;!^yu|#`o zlgDaI9_8tj0dWJ;oavWfEp7Q&+fl4}w|b9yZBuEqsbuauZ}ieo8LDRKuv6bW79(dy zf&SWnex>DwHk(;SeUbfU7TlNEa%Q1@1G~;F5)ZcVED7rC+Iz~wuB34^j4A6dR-QC& zNqNIV?8EMMSu)weGq_cyY1_d6)_eP9(z5oS%;+O{gxQsS{r56{AdBvQAMEYa{l5-& z*7l!ANo6%qT#&#gsTy-Z|1wj{TtlI}FbW-6&Y$t9dg~_;Xezv?F-k?^qz^1LtIprR zVXQniJdsBBIVqCR!u7T7+?y+pQDh${C@$QGk#k&)sI-4vnU~dOppk^)wL)h0m5xiZ z;Db~u9%moTl~Zcglxm}fd1xJG&67LLs3h)@w>G3nm{pjAU(?fR4fvT-PYjz*erDak zmzh?Z+Hwc}vlqA)asMlHJx-lr>M0;&FBp{CeoJzX?uK=~hGG5k3B?+dQrW-{%gv|I z`#IMu8wa8rLV1-Zja+9)8Hwm$C~9~%Ah&n|eGw*KHg|JFk~1Zg*T99Nws_VoR|c&_ zNpovntpi03SD{jVA&5nRwjz?pr)5SaZ)y>ELvS;YMy>D3rFK6y}$cK zwsm??`V#rSQ1H97Q2y`M{r~p2yKDXbQPKnS|Fz)H1^?1I|HBrD^~$hLUpd{wfYrX+ zUNwISSRntm59T$S$dsj|rAk2lJ7r;neK;DueCd^3Dl+GWnHI}_3pDrbTW(!& zt{~QTXVUBF0ilu2`xE!$_tWF-N6p)&Il@Wi z3e$$;lh-Gun#e+zRSRd3;UZ~>C&Qsp1mG=ay637ESEb~kvmgeG#^b3DFe3(LN7*lg zM)?>E#u!|-?GR|&N4RGWlkVVrt91{?FW~zOzw)ljufn#HQ`Iu^f7$(CgMU-o+LRX` z1T9$qyZhU9``=#gU>*PIQPQ0EpUewt^B($ZyaqE9XK3~&%&rx0e}i?r$s6!#?zSE> z|7n2PzbkqFb9;NQ?*F&9ySD#7O1kU)x4)Z(XI53Y-%ZW@8b;e)B*?B^*yp_~cQLA0 zHrE$vE4=yElj3ib?v(%8gRyq*KR5!!0{OqQdr;&MmnPysIhX?D3QBPRT=h}`7os48k>}igS-TGG z9ZfHa3b6&P5EY@3Z1v%D%g~dJKDkI-eQ+OcYtbWI3~v+q;j<}h?tRThk;re_T8JOa zKOb)^SM4kxDxmDY{f0mi4$h~K{v}S!a2%$(QVz16BU&nG4y>NL;+ybK=vFUfeX7Tk znDP|jROXS(vzh1OL^wzv8$wc)d0OwuR@T;oLwK0`&-21})Cqry({ex!yiUwx=6BP$ zp@JqmynLzWekpeYLT+Y(F{1(N{ek1b1#b8fe0U<1uokE@9x(1zurN29sl)F(uO;i7%6xjA%o`;=a*>$ zt-G8-TF@%_uC<`0|J&Szu89V?VNNXmG2|&TGAyKlDrHDiS!S#0VpeC;GciMY9eeW7 zvX|fIKxn$&lPRp7LRCHG;khY!H?##BaA#KcT?uv>usg6cku$(AhwbpS66?+()+rg}0^c?@p zigX9D@! zc*d+P-Me>_UsfK#Fr3MN5u zuv~|$F*3MFX=LV=+n7n;+kp3a9?3K@rcRJ!j3O9C{K6`il#RAPpw^BjXwGd*-w6x7 z4PZDjiwQhW1P`*XY3^)@y(;80ig z+HhigV?xtxv8a!bStlN#38l{1ad)=OSv29SQ zG2{aN2csBvAmDLA6gBGB)S0*5@Ck+xh)`OHB9sFyh0Iw_R_F|4Q%q-PTc-gSzbz9o zKG!lWbu|#%16R>2ftW$IczVwE9f*W8JBeXTSQzOIa_(Yc`eW(m5V3w_IsY5x|Jljm z@tc!XP5y=Rf4kS+>DK4}_Q87oKT6st&t)_4O&EMLYfp3ghv(fCzhHj4fn$_`pov-H zzvHAkVobZMfaTplzr8TQQl-9ww91Yc;#GsT%wyR5F~U&qB#{MEt5jZ%X)w0cfl3hR zcQ7VzV6fBe4ps!ryMg%;Xv&Rh>c!-m#%T;2z=w`%MQ9rngIVY$k~1JBsH?$rMd=0x z+p8pAE%MHsHs6y=G}?;bQVU2vEZjb|o?vO2t5RBn3g*N6X6Xh7#x}DG`sKtcf)CAU z(-Z)yOK4>!qEhKyr@LHUBBai>lzeUsgfK*+Qh}Q*K=;ej4GdN_@C@XD&M{Lg(wJRgb(ye$tT3Jm+kk`wcV@kus2_dQUFt6=RhnZ_d?(TAOy0)WH@2x4 z3{w(xN%x%LDo}Xbq=`3t%hjnsiA;SF5ewp#-q&wz^OYgo-KD;RbOQsPAhWzDBV?*{ zP2!%UEECV3Zq9477aI5RkjqM%6MB@YW8&M&TwBhA zIJm+21k(*zocqv&qtoBFV6V6RhHhm&k|s<+Y;v~u_TSJuVR<)|NCWfR)Wp*z^LrlI zT-B6IcnC6&L-bX8y`ppjgKA75F8dHsmR{d~I2wyU79`=$Hgn?T#lKhLeZF1cv@OSk znBS(^)Rszqm!w<0%v&ZT#SNUgZxHB@W-nn+(DK?@?48wM_Lb5N4BTO%v!b;FK@|;1 z^uTF5CuVE4&3VMLd$1A-pLSX`@$yiOceZI{UK(Fm>v8E!F0ZaQ*odq7>iYz(d^a%Y ztRw3%|M?J7LrZ zkupVN?6?al14F#oEBQ@rE=@f7q!s2>ueLUJ#+g%qkBitDzI`>>w)w9ysxjK>m~Cm~ zn5R*gJE|U?9ZMh1a708Hp_F=l8}Ar*LBSllPj>#(`p}f!Ta6Da)up*a;|yyqGud4` zkjMlu^Wk@i&~D31nzW5$iaGimYp5A8I)o`t;fgS&`KpUr#~Sz6k|v%K=}JsdaASN2 zjK- zouGu2g(zSU#9_yHFLsP4>u@^o;XQ|#Tq4BANhm1Bx@Lu!dE{(CVtI&q$r@amF_5{x zJ9irv!axgU5@3ub$_F2~merUjFZTwrzCL4Q=HPi?n|Ga%10VjEry}>WGR&7_eg$a) zl<9ijAS?YzDl(#MRB|;kz%#F_MDUm^%qOkjT*1JQxZ<9q^B7($bLs)4y39EDY6(HA z{O{3~ah)EJ8vMUVlC-sGiE%|fV2kd5@9g#J{@*)$>-#^Ck~VVZ!Kz=UlhwJHz914q z;KQL^zDoX}AF35UJ|8QU$bP3YqH3I8_yLbQJVDmBCewD^X4+8#u@e)mYIm&6D&=mv zTN|GD3l|XEYaK%_V!Lp023lsiO|Hhvo$PFZiTkW`w3z)6@<0}-xlwO$(>d9P&Scxa z?jY;%gt#f8G2HYuIk4(TDQ$B+iujdrSm1HO8Jh5xUc2rldsIBS;WnT!Q%0|b*`Wfm z5oK=MZmzZ(Pkftac`@P>6pS!;()C-e7-p^$(yWx_jaQC2(Jg%_o!u6PjHTjke^HRF zs(f0)X(}TNutS0XrEHT_jhXrXU!5W_a{*O$Hi~xh0FqahSqA!Ky&`M?TDtPrfen@;>1i2M{z2%8{nG0MCV_)@T$7W z1{%o$Jf}YTaHSE|2RV|IGp7gaF0>0Jz+#bpQYW literal 0 HcmV?d00001 diff --git a/assets/rancher-gatekeeper/rancher-gatekeeper-crd-3.3.001.tgz b/assets/rancher-gatekeeper/rancher-gatekeeper-crd-3.3.001.tgz new file mode 100755 index 0000000000000000000000000000000000000000..c58830424df29ed6edba1bd8dc7eeca539b4f373 GIT binary patch literal 3683 zcmV-p4xI5HiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PI}rZ{s+U@6Y}#3hp`hZV}6No(lu_A(QkDa@UU_=^kJ)SoG3T z+2V#GwIr3ar#Jumf}|u%vL1FikM3F5mpBw3#ZpzVepR$3;eLp6xF8yD5Mz`>Uk0PI zkVx$D23O14Ie|QfGQyH*VA`zhjSq# zqBRCErwmPpbMzUJ2)9I1GGnMf3z#9qL@5k_axGvf5(yfkm=TT2>-82)%m_>Uk7j5- z0XUbCvUi0Gsbt+j6R3RVwB3$@k3ofr~Qu)%l5xNo{T3u`+p944Oi)@-uwD>2m#J1N9a4-`y4pq z`z%xI>%;K=-gcyO@P2iNgtOl4z^=90d{`_g*K_Fqmm2jSGy(Ep?ej~^@JFUj`~QcS zeQO-hVgE;yaW(!wIUFDD?Eg8a7&p&|_QTQbq<2GkFopjUvtERn1Vj^a*1F*M>sRm` zI($zeg!}uUFOg_WJEQ@=>2>AecH7T3!~J~^fRPz8<&Jm~$2Bwd04lHqpdTWOJQa>Y zA#VCph=tNa>H_;c_g@k(BndG6PYT9n#J^b|Jq(bc#$hZN^_TrA*hZE3K4O4DPsQk) zMKc?43=}0{s8bq-ihFI{FF>}c{fzRK`grR5BH{Xd!%7k1WMSc$8{;RE>gAc>8o%h( znlj<1>b>AsLg*!;|_%PkC+nsR{Dt`vx!ZE{QCbO0k z#Dn{LvzgTS+p*p%j)VLA>DpsW7MYw1D_$myUD}s8g^T%np)VyW5GAf27;LvJwG8Lwk*NKVFZi4;M!pQpE1+{!E>lwIc`H~F;XCv8RB`R1AXlSH z9ZXD1f)AW6D=*7YWahjiI-C}n>1890Gt-$&KJdQ~$`YB);ke2fYGuXE`;Np#_M3D* zA3Yondyn8zoPj>KN0GQ%_*q>>VbvehWl&D_S&Q3{7>QdP&M7C1{*H3zJib5Y(dPD3 z_5Xvq{gVCth_CL&{r*cwu{(Mwb>hF0sy}Rv&ic>ExSIdjKRVvwzvrMz-NeOdXPFb& zC|WD{kWJ#XY`Qf+I)#5(pY!$W5PtF#DUs`IU&IL0{{_!xdRC8RpiJ|Bdw-z}mG=u1 zGk|_^&e5O#WT)-_Pid$me1Y&&Omw&hKYccnq|+69-BBG@m{>iAY*Ey1BKQ|@$^+yY z4zmNxgu>ZuX`QN+i<9 zw3nYKEuyT^CbX%AeLa9%lruGj-v%y^G1!B_0=4-Q8>DJJxL|Pbx6<D`T3y(d3 zbq(s~NF}p>ps%eqxbfE|pu5qgHaV55!p=onS#LXWK34vsQ74>Mj&3J2)Ffm1QUuqj zN;*jJ(p+bWidfQH%5Z_f6y}5}qXC35ad(+Lc%yv6h`Y9t^$3O1?@`|gc~fq@xI8O2 zTwa{78djMK%c5~AQy9EUl)fsYHwHZbg>NZOzLfgeKBs(XpC@t5FhZ^gTlu5)DF?3A z1Q-Rd6;VEgNsgu>@?{FgM@I)oJ%G7#cJoNnWeC!2%!ViyjITse0)j&rcMvtp6uv}sUiNmdUJaI?u{1(kFiD<{+k>eR`36e4|nzdXQ9_kx7|*&ivi#ppAI@C z8tB?hxb|FxN_LzTjF4#*CgJ_imCI&JxN3ly-$Y<{oPipA`hZD3UC z|1UJYrD-ZXW{D&$U*DDMLLv2gQV75bNqj~5B4I>MSB=USF-{>5y8{!!5k>N)TV!s%^zC8*%B?KsphK!aV(jWRb~~n-a1a*mc@}k{R7PeA3k;XHC^bm*#e&n{ z^V#h6QnQRrR+$4zqvV9S+pK#)cmNSuf4#LKuM>$Q64Rk&mRt4rqTqF&Ef^F;=kAg# zEe%A^8m)RY%Gcdk+Q{C6Gi&WILrCI)Xbe2K;BZDFWM>97KDBs_B-PM} zu3Ny>g8ZtZlThAVtgJWK({+q~NznsTR2s$9M59sXvto&^$+8@V(|$Ou6#QzIOr$dv zW4k#<2mIo*#F$uR0;(}m)mfXFaAYS7Fvy)wZLh}3TV%i9fk;V~HP2%yd{pZBm#$TX z_F)Eb8x2@vsN?nJ<3LN|-{e;^3NmPXOGd%e8%|~ngE~1lUS~ouBvqwo+@e}?f6G0X zA;#(4{5kV5YMWgwxo@l8H9IJOduI+@@F7OAUhaY5Yzb&Q1u78Sc`b7c!yRSJxFLz| z3p&}i4L)3+)-2JYMeP=1Xt7G#)KRAruDklJC%9cdj<=knMcbj(P#OnTgOe3?Xwqun z77Anxr~Va{Rx=@)@y3Ni%MJD5XQ4yrCkkcX1`2(9iu55$ls(YQDPX_)_SpIpBvFg{ zl-NIIO4$DPjLFZI50|H|Mrm5p?OdZaoBR7IV}Cav>?piNxzr)$3-9UlwZyS8YRA$_ z<~HU-m@I0y zP`KZvw2<}=oo#su{J&xTM`=kY*Rcqk)VE3ob$|cksGR>f7#~b_`QK-uKYsqFs=BB= z_n|!-RNJRM8A{(9(8%5bw1YH{(hBO+mUP*Fq%*cX)7%QWE!O+bKK1mj#hP#k51QG;h zLnTo)WGdS6k1qnOkNZ19bSC^-puW-I2$f2Ie__k9#_*}EOIr=?UTT`t#U3MBM;kPu zhS|*YenF11R#NV#FQ!ICj}L37HOjlFpeXY)i;+=*xq2mQ5oi52w}LJ%v}-~x+0*Y5 zxa{arXgmCuT?^mtEMkLjQW zY?jZt!d>ZgW_wF?R=SCv2Z8YQesfH=15Z2f^dAqND%JN!TqD;4y4H$gW!g=~0F?a2 z_Jd)An!-j^fa>j`dTK3>Z?RfejUG0sOElL5H=Lp~#;UUCT%q!+e z^ok_-MSt|Ib1HG4)F-$hGo(qv5FQGX`b$ZFQS+- z`w_Ljev@@qC4I^&Y1tiIM6r;Xa6N^qghM|fH`q7cP~yTtCA3_omHtQsbiQ;x!q_=w zIH=|14^@R-{S;`|m5Wao=gzsBi5z+j1q)OLb1%-*^I{Ju_gP|Z4t)A_ajraopV23Y zLbyW+1RrSl@c!*z!L-;OdrszNdJcq9cWSw9*wOA5f?e4svGH`u3XE%efQ86nDc zVQyr3R8em|NM&qo0POvHd)qelAdb)9dKDNsJF&YaCHa<&=9Br2YdfibO&qW7v^!6> zYeOU?p(Y6q0LpRV>}&r%xDvcdvRpUq$DC(Ui^S#N;Nalga6n-aOcC`a6pmp6dk0fU zxql1e=r0fU^!xq(-p-EvZ@=F!|97yvwf&dDPJd@-cW-NN@5NvGgPpCd7k>f$hXbMc z$ry+9Fa4GKDi7`t^1vh^9CAz&HtYZpKn^1^@dLVT@5X`nZFiq{x7ygdF+X)d1C{DS2bumRCLNLV11aLwkFeViICqwjB zyn#swW|(C#!k=Xc93#LG#fZsoG2#>l%{~4thAA^dd4Lmz=_crI4b8U+(n#pZqkLbgr`z zvV;7;1OKJp_xha};)Fw-Aj*av51<(0Xb3*TWXdp_{%azBi>^BWC`_79d0`r5!8Jzz z3NQ;up)BRWP2AdWce#NXAPNAR6-Rju5Kmd!o4w(H_VcI?_#v z+C#bEJu>zfiIKM+sMa?qiA4V?OOFho8l;q^`_A46zI;`1o~J0l<6DrD5ICEo`gn%M zIqw!+mL`ZJCN`mv1gsa3BtR)=y_7^axb3AR^cV^tzkg)}Dg0h?qf2uPeh zWQ(^f;E`-l;#>FNn}R-!(kb+d&BG`nHz+)%WQ-$pB4%$0o`)p3Mzj|yYTBe(BDPKQ z`}yL{>l#k;Lr`vBHD^$qw>Lt3F=%VSTd2Ky@G9-;%}LRX#PWPZ!WoE5fun+(wz4$yuOkSyUfOI&WQ`o9;5@nL5T zfP!=>8U*N?`7nl`Ndj+}++oEM^&m}#K>P!5*bu{bI2>$k@9gfqc=`8!zdsaZdKku@ zEG(D$6j9XaP$V~EHUxuCN333iz?GPmfJXp*N+XC9v6Ms1XeLFxQ5H@R_gRX9D=FN? zgd$Ou&RI4bvTN3(S>idnzgSFTJjp2X(s0!4q$E7FU41XRlB{J7fEXwHGl(O>h6K3c z75wxef$q!3F@z|M$Zag7Ljk(aIE5TdZkHQ_UHUhKUPlCm@)f5fiV$VJyp!IV>7bLp zJlC(pG%Gpij6`B1_>hK>%h?cuGh*zNl98ka0m~c2)vl64J{^J{OmT0DV8o~DEunk} zw)_2lrxW2BTA@AwIKdpl=nzHl_8bKy3E2?ri7#Ril8oDz+v4RI;wYo&VoDL4k|-R4 zL0`^+-mRqEm_UvI3#KT{B1AU<@+Us%GEV4q*I%4+U>M^BixuhV-N_-IIEy$=BjnkB zSg*6T)>+FtRt%-9`AA-{+v(sKP9(Z0N(mDx)2)?rdoiYzP!t*w&kDW(a5yna5CCin zcziwQ)I0&&JT7$FeDB$I6@J`w_)06s-IZ`Ab`L$0$7N$3fhmWobK48@ZWc`3y+$dO02 zIMd5Fw={=#s5zep;#)2F{D^MVM==AP4o8f?rZ7fg4HZNn%?Z_PfNo_1Mvyc4PuJkS zI~(`|zuz^rPMa_ffZ2HtgpT-0uLznY`|N}yXM}LuzkdCJAu8xutgD*JwN;&pJ1@4( z3%OO8H)Ho_$71@t+QXNxosQawj<}*Ra7sepi1cEdBrMDLrl7)} zD&t}@CE+QdTrpe2DMh72yA2l9g0+H-V|qCHloEy{KhtdhEkKFM7$E?I05K-?8cJe7 zGR|2i9dhVMsM7>FdL)6o4s(nlU$Rz~}O~>rmFXgTb=>+jpcMBIw z$9=sscsT$U5^I^jm>4U7V&Q$#3+2Etp$@3b2GEN}|48lJY6v?9)OX6(I8?jN@mMl} zO#s1p;X61uI|P{7T}wz`5||kADasfiDWZ@Y?XJe66cCCC>*ZCvLY1mcZ3Q1~T~y4) zv6kj$QXsTwDIZ9-k!c#VWdWZG*r$+kF$g8vp%8GgQN|QbIHEHc4Z$E5%Q%=KW6~7! z7BR)ub!iq*n4$|MV~MF`a*4_oaWYYSQ!`Y#HpJX&ZkjXGYjhh#1YUO}_cUZ82u1vM ziNbkXBCpOGATvZM4iOWJTZzk4!uWoKAw$dv8fIbU^6hh3fpwh14!_6g5OfFrmcQ-q zbTxw)B_(o=*dPRlZhR~I6SFOZvt&XMV>QMSQROwE;77Zzz&~Gsfj@W&Zb=5%lw?t8 zWh?=+Zp+Jsb}++Gei~yGg{FXI?6MD`P}ECMqlip;Tm7wmufNmlZ}o62wk9Ny!8}ep zK1H7C6Q`8w!5%RKB#Cb2O22-| zz(2(d{aqKokkofd(HMUc^~Ah0C7XF_oq6aJjME7D5ec9&sDLB6y^H#CnWj`)zQmts zhUl$q1sIW5j@}0sV&!5c*7P`wz|9mTKuDls)hm{0Q3Xa&FiOLaePa+1l88&udc5Ch z+8x2*8YN*)%DMr~FInVK6k>`3UM`{5WX6z}1EF;VTE$yU*A-N@LzKTThW=XUGz)Ql z+v0}#D^`|UJUk(1Yf*n#oIsC9My|`JSvOoi*PWt_vIaneW6X^yBuFzUpU3KFjAAKa z4Ypps!{QZ1|H%+rthm^!Pe_Q2%qWm9Etpa&6zh2rMb+8O6A~hip$G-i%+Xovh&Si* z1))~ZxgZfz6nfu2tx2Ll?>(`ha}Oq=$88rW@Bu&YLwWViZTt$mwt?3A|*6wxdbo_ zl}{4Dj89R*wM|NkMxvbEc>GShK5<{H4`pz~)$TddghQxe(Fbu`k@Jb7?eP8}>C`3wAE=w7w2*zGUBU5g$ z^+J{$45dtJgb}k6i%9cs(5Pb=jGf3_c9m296N$1IF~Fygn|)47vpA79tGuEvbAzJ@ zBt)4?gc>QOG)GubqDpa2buW_8)^ads^0g;41&t*Iulm~zs=)cw-$DeCox80ep%8l^ zrk-k7+jX^?oU1^J$WufaW*jAf7_gw%Q}c#vtbl_E;+QR`ZP<3X21Dr`PItNPkq^%&7$JUxkP_j~@n`f_Y^hpE#!_ZfiWj1} zX3Me|kg@4gZNlOOj3Rg8;RGm7soKyoBr}~z4OQ&5V!<9v$xU(yIm{JCIp|FgLJk4B z5iL$}T3E;wF~kAnfN@B9ru}sn?z{*+Jl71RDo(Y{fM$u2^Z=wI%qgU|f{RoYM`gg0 ztrviGHyXj_?KB!w&m3bjE?h9Yi84Q;Tivy%>CbfKd~teT1BYmg6C@3pP^mg2 z1kfiGWMXH_mjoEyTA{@ftCVgL3dMdo)+oA~NjC^eW>=D%!9;Ktro9a71(|OIlg|(b z5y?Vnf{|)3PJTs!kw2J+6HbJz$2{3^Z$m>Zeh60DrCH*$X%|2yb_txQ&0E9zoPk5> zhaTfd?1*}tLn&c(*|e+v=;=njX&;leUayn0_(CIR9k^vt)e#xD~uS~6ihmT0RlogK0n<*IFgLA#-!*KaPFFZq4MNA zwn5Q^_@yTfbS(}u<{`+qHz>L8aoGFcbDkl~M%^Lks)(YY?p=32wD1pGVDD~fh_-zI zIfsEX|H?f@kWW4_wf5-KtlP(}%h$(mkA|Q(gH-ULp5}bLYDh+LmU^6Aqr?I4 z=*{u_lOeEFp$D<+gDeq@4=K4)EMvitrA}(}TD6rJXPVs4MBTk|UB{Z`G~p1ii9`W# zL}qf_F}xO{9+f^8Gha(%(3w3sO^_y?j&VhaZn{zqeN9Q+t>uq`RU;}S@DitlVOX!k zEx_Jwo4pB2#PUF)Q_O=Y?#%}N;6-l@C9@f%O2YAG1252_A!jHFwUC<&fkJEskZIb> zsSp6)_syS1@%yi9bo)>95&(;aVg3$)|58#qmA#3boq>PvzkB;nfx@3`4QoEq&Iu`u z8y@#?Z zsqwLn5yqIbAn6+Fn<9SA82tOhY}73BlE++|7KEVGf5G3c>C=3qXFi-Fl#WJg-TbB2> zjV+}_kPdR}S~g-!BzYhJTA5J0S<>$4cnk|13E8kLVWsBt`Gaq|^`-^Ms^~Je&wQN%iPr z-NCp~-m3Tv7`CGiDV_=Ks?fY2FzjEy6inHatDP5ZZ`%pEAKNJ*^xLJLd8O6$}6SP^=1G7Fw<(hp5X#4xjJBN_!^pg zXUM79)22L2UND78BIBmkI<^->8uo=t7}mf z)ofS}8_-`Y;B&ZvEmxw0=k{DgK3&J|RE|j$immwh^CFRGLz`L`n2A+(D9|oDEv&YD z5xR4%!b9CDPID2h0Y%0;G;cPQ&xBAZ)}Oq;I2wWr6^)fQCU*=uFH*i#ru*Dl2dcQQ z=!u3bgxFY_d8eloQH%dg35K;kCED0@jc)zIjOWI2vqFqSO=9HPV!3D|INA{0Ia0;5 zv;Chh|NrOjPfP%A#yE-V>WaA>IuQ5DMSanFF7{gH&!HQJF-Z^?w6kU3P^1=z`!3&? z3OkYdEWKcLAljWsAl_N&A&9d1_4t>T?;#t$b6;?kXKyAxwQv;s{{D zaAq*Vp$bqD^DVxb4X1o^X{|ch&KJdJF4bmTSlPD65>J$f?wBxi8Ij3E4D3}N(EI>JeK8789h42Q4&2iqjsE5TjZW*EGRZwZ}o*Zk7X7#yMingGgxL=g(Op;&d& z&p0?tw3rH)miw-xSf)=|1W^`>-5Dz{4vGOzjH?-m+8oc2R!y0btEEMh0mwmwAmeti z?Fua25PB_p31IS^+u%=5CP<3FDs}-<7$c4-t5NH2v%*T5h2{THQRu6ZX^E79)7aQAWmdmbMpqw16?wh-~IYV1}j?DlmKmROb- z5&KCkOnJDC$R#az$7`Gdv7F_)H9N69jgXCykPu`@0IRlScRjVFJ|y@*8G4N)q>lR7 z^&b|FE7pcTo*$SQbdKk)X|j`aT1TIb&IBm}F{IaesVgB})VB8eOIunut|S36Ua}V^ z(U*51y`V#oWKpCWR`hO|dP+jowmzpcB~hsRkt+|mBnsBEBtndVE3HAjmTjHD7)h?e z5Z~yy=AIspQ`e}Gi`cpW1C5xIO@Q^*W5R_1%SO6DmWY1^U{~tRl?s;0xC@^e6qO-8 z96^Q43akZm9FKEGvZ+{|T1N^Lh9C{5lHhX7keUNePJ#^{!mN1eUHVxBG&i_%fDb3y z{q6RL8K7B`sG~q?0~Y+nv~|p%Gi7B;2~+-Ru|D+KgZ;N})u~Sju8<9)DDO~VUVS{t zj~p(JhKP`B<;Tt=I<)+&$1M*yt9jE?X69!?VLFweyk;o>c0zn)1D0u)nCX?ZVrVY? znvz(oVRkP4xEV!Pa{H1%6&8o&(*<~#sp2~{nx8B#pu-wI3R|99DG}n#Dxx}KQ6Q{D z=cb8VwKOpjZV=n1S$JZ$C5zIAsGmwrxt z&g#h_QFhO|HXTE*X;p^tm4?-+7~*rufqPO|hT(8_qHu*0lZSVzKTSipL3*vK`h2BMCf@ zAVXFVZ;P2y1X)ep;P}PvOS86#MJU#K8JDFhQ}ItmQ)_GBy*?tZ7BHA6DmSLjtS;YcYN zCRUC*pqaoZmF(zD7f^cYQEnSjxqfQugTL8RzcnQ1<`F9>QETNEk2Qcsx0&>_#l`96 z5AV+}hM=p@TAIGkj!q7b&W=ysT<)K}IUfSg)7!E)NwZDQyTM6FZajuRqgR8im;FyK zc3)PizI%UoG&C7bkTPmEPk1rBtpiFgbUv-_+*S&SjcPJ68AoYz8$S@nfzBwQlLVCW zOEgzD$$EgyZ_oiX*?W{sA<8b zL6=4+M@6hsE3?EnNq}_5lQk2-Z&IA)4QS>e1R4qKH9~18cRp9R9{>sBH-uj6m8Owy zj)j90_p4}7+ED;bMkGr@z$qM$aUecOdDpeDI_JIB79gu0>l7re31VJvB;)t(?nw8R zGuOv74E^32b^lmTIsdZ}-&pVD{l(F_&p+{%`sqh8 z`P>CQ`1-ZJ&^U`CQBarhh>S84+r+Ex7hmWie(|X3*DkmNDyT+C7$_hi0{;Lgn37Is z%a>}@n_>vE)^Th?wi$+MM{tm#cR63a|N7Nz7P$730YFv3aBxj6wZ`{wGSGn-aZ@(H3;jX$-k&bE3)2 zBZ5?Wy*RIWxwaC)ksDgTYS^`uw%~l2%a}-NR8HYR*%vc8>i+B3R*RFvlm=ZJ7&oorS zXBLxostfbtCYNlV8|9Eb<{*Z*fWvDcGLF#=h;fo}#B2hALb}T1QzNvSp+v*HV6VACdcK|K+a*5~Q_Lz$7OgdCY&cKP{Aso?KZBj7;z}Y2+_7(QD$gMk4X9;#)RHL8iHpA zSt_wlz*o|R_n;t@mR*nngUK%AXg+< zd+0~E2mttb|Lo-W3@&$tbsFq zC!3&R6`60cb(q6ZmnLO?SMMaAt81tmgv@ntwKoUZYAMADAA|1yV7>0W8W78@{C8&! z{4A}<44h}FJa*Ih@}>7($GkNuu_d#CVQ520WRgi0GmDe#lak&xJEDlpOrc*pDJM-Z zm%KXz1jg%a66k3Aq~K>xi=FR!hA|ri%dax6q?6y0Dg1iiT1L0I7wicwWv2Y2s;+o} z#$y!lp^&oAH2~tZ$)7R=U73!iTf|la7$)&tVU8J!XNVqKV z9Zb;>NUTdjIQo>D4b-&*%OkgbN0-a7QWQA6{Hr7yUtN(YWeY>XqNH`H-2qOsBmjHc z@=qL}XX7#c1iD_geiWSp6SZVxo3bdn1!D44m?BKoDI34@Gg7U~@?7*GM!~=c1v2Fj zBQbIlN-L`bMv;#4l9pFPrpO zJj*i>3fe~0=0b1{M=>8QMopQ<0t=zsP@4GhOoBgrwfL+$a9T{LR*K@Lg|+Z@X#jIh z>HQDZEw!flU53tDSCOGVXJX|=mwFvTW_><()hHKx%LL0(-Nxa#*j9fVu5o8m_jLbf z7j!SX_g_!t^qzwCS??y_hPuo7*_?7!Yds%D%ntH#e5XaDsde zC!#{tlwdD(byV!U%3XQOa5}gH|H%kPB^|%og9`l&z0s}e#9&=tH3F*3%(?MLql1%w z!#HX#P_Z`T(`B-_&z-O`hP~R+SbIcehH{HdO*Lb&DWc${hF>j3WQFjchbFmV<9~Vr zY$Y2NKV*=x=!85RNtNTMR;REt#51aV8Zb>MNhy|Mw#{atj_ZzKP$T%r*Ll(6W?4{| z_D_#>A?E)8nW?IezpCp#RGO*YY|sU+1U0X0vnbhKQ>bZ!cB%3AHdH)4zsb{L|4T`@ zy6x{TbM1dS+dG4uYW&BGC;Q)HJars9C1Jg+@1}_se5rglsq23#mF>D4Qe7Zst%Idb z5%X)rqf!Kd>Z25tP?ed=weA2;LX>c@RU<+618MP zlEBBVq?PU_=z3@3--5Tj(2Z|fDUH9joQ!}HDA)wua&DdOFZz6eJ>^wpFhxw*qB*nK zGuW89c{BBZZ6;N9^NS7BaB^l=p{npJXD#ZKyEb&d&BE*QtNJSSZt_xh4sR-~XmgLO zS2|Exu3mrD?SrlCruG|`MN@^w72ZCA-(ZzL@Q{{X?M=;9SG==cdn26m*i^3}c}p4i z2)bS%R$6==VhX%;k%WKXZ+`>2v0~yI16F`+IhcZa2X* zb@X!hs;yO&%<{EpR*Ikk(T1H10CEST_@-Je$!&AjkzY9O=fJM=#2XlxO=jH5qcg0| z&(l^c6}u=0HqK*nDps?e%PlaoRhCv?@SA6@X8jezNGsXftCS;pF;0b=;fT-nr&}gZ zvn-03UoI$f*?yb+S=a~WgpYYF@Z3ztHCu$~>S0B`Vylh3vskQg6R3mnH(>oM0KGJu zFEnym;q432!^1M5`F;2`G;BF`aqYZ2y7=M!VVU2)JwCrE6@0t@{n6VxzT5_uIx?Lz z^aZe$pS!OvbP2=dPy25_9F-yY@?B+$!+aEF=Wky|i-Qo%*7{<#s;T)g%5YsWGO{jT=(UtPtb%I5T3gdr%I+-oHcNBO?5 zy5;65eA|O${Y&UMw!wwfoMMd!)pA;!URKFzAFj^4C#b|z zhs0V6R}xl&uV2ma-yE#}W_z=1n0sJ-siomR%{C{|kG>%1Dk0_xn?qJD|w8{4=8iA(ywfs@3^O<&^~DO{IQ z$7tN^8LAvbUbiq81bYEN4FLJI6fKOe#sP9*l1)Ce52WC#F@Bjmp10dw++orpW_)|j zrdId8Az1$@$Z>Opu=x(J*v)`;H`oD1+oLjdFXkEWXx2(PV9bkm= z{V1Knx!CgWi6y_z58Xssd5+-fveCSz@{eEDgeJGfM&1=c)=?ZZH7+v^o?;;ezP zSYVN*YA>RAFtp}D7WH>Y)*`pI&x?_gAZrN7yrK8KYVw5a{1x>=&XwD`f^3Pf*aNW?EU&bqT5nz2dIu#W=-;o z_f9#)HRqtRgCFt8j2)H6_5D>Y$*O#n@;a*)QC?$C=$GsB(RaFke*W|Ov%`&L3SUZN zT#d}uT;-31dgr3BD*ElcVe_ZJt_^+9e;vAxPtGs)-@d&(ef#0f@yU5RngSy`w_<8u z+PEqP+)G#&u<^+ouwZYYlRP~{5!fu4Q1yxaoxT(fDigL zBFREPRblDsFi8o}v!*C=Q9|eaQ0;E$vNh6}>ui7E7|zAwHLF0V`CSc+c`FMgvpQfR zJVOa$?39ubs&jM{3e-3`g#3$JTPT*OZKB1|(&S*NcqxrVwG=fWrUyK04vk1ih)lwP zVkUsvoMpaa?cS=jwdBo#+Q5xMiPngX!tmgDj1$bIo?-qj;H$YUC-Gb32n)&K<_*%# zQ&&4iw?HFUO6`>@N{xlbiZ6!T7Z+KUv9>oUT+QA4_EW{#(Q1A!a^@zBXN{e6j*atv zh!X74YiO~~r8H-NMZ>P%lAcjQxa9CPJCkFAH!0HQH@l#5yagp069ldZs&b?6|1*lwJG_)$H??!ur^E2}M~9a`kCJVM4HN8>DF zh^vvVfrRiWP9~k=4422>;#I@RJ9%AAS$;>__GB;=qY!8D5UA1%waD;iB(E8A$j-cK z#B+V^vfs!hW(z6!(w=4T9S7BFfXXmTFdzRMSRb(#N`TLJk^&F@I43pp>!EWuAH`Bhy|-3o;*HUw_i zMP9ouz{2zqFg!<+VYt|?sL>pDweG3WlT!R}zYl>h0)?$%TO_eXhZ1VNc7PA76L zI27auYcC5oYQAO+9chknMj|9-rfZypL!hr35pO$rn!F)sOH0==_j3s5q_Pg?;dm7> zFtTYFb2hqDue94RQ_Y^77Y)k$PDQI+LO#e>lA9iKzGSpsTdXt|#vIwaR zOH}+dTn>ZQNaj{3-;3kL51a zU%&TKzSpx5{~w8YOeXi;2Ileq?VWy+|8EWYyHEN59_3ky|JO11?-i}wkH_DOr)ui_ z=C%lyjfA40EvX7o=jA)8TgTj({?OFZ*Sas_*y;#WX*Wf6M>Y|%3vKAD?0vZ%q2gYx z>#wrwnr)_qsGic+F7dSRf3qLgSWK4N3Fh$st?j+i{ZCt4FZ#Pr{QohYrOb3LPoE78 z?>C0CKZ~?pvF0t$YYi3(>tzV;Jc$n%jRRw9Qn9!SVa8hWygk;~wyrHI`YKC6=;Sz@ z7pQD26FR}wKp}S{XsfN_o!M(j;@3Dr?3~jq;2CvnbE?SVqp>GmpBJ4Vwn2WI&OLT) zTPs{+Y3?69L=ocU6#H$hxb>XoU=)#?5BP(;HkmElTCrN*oVtGbIN^xSV04ZGl7x%5 zT3g$3oNV{=o=B>fkMR~Y&f~(}FF*;#DfQ?lp8x1t+HdzPu>Y@U{hPc0@9gdFmF<6n zox#)o{}|5_TlzA=smpGd-J&C_?e3`p-`=k|!cfLRUA|P0aQ&16`cVV1&bNUc-qN5wF14 zui#auF0i3c!h~|Hn+QbRoC^DE3c9Pg6}b}7QaIID6T>n;jInYuFh`%7iX!Bt*vXv@ zH!`Kk=RN0y-+Uk|70MtsK(`!NO^5mxL1I(Q-xg2?RK@yayP{7)OlyDbWsJ&1%_x}x zorRYn4m^~N>w-MnjyK*P8EB}MkM24uWvua@RNL< zzbj}f8q1@-oq!}&mjpk`0S=BuNP;;Y-Kc`w?|>)E$j_ST&boHcF}Yj>TO z>Xd_SBxlrOPq_mOh2VM5^ML;y@Vwq%0qb@1GTov7-DdHHlfb;#Sa-8Gd2Ufz9FRjc zp6qr68QR+cUg&ZwJt6;mkc0g$=c`c5V?`)5%l}R=r-0hKzdkV6|G&Fcw*PGn`cLxz zV?3>~(SmTtLYJ4@hSci8N*OyITHB}zmo7yeFQJG$TT&5uR#rr$fvI9ER|j`os0`;R z;wD!)@(-^`mV^SG@`@VpXgN>npm+s1%}hoEeW*trM{**oB!jbxQ(eCXQFmS^XVcuK)>I$f;=8&C2U<`RM zed}Pf0jJfwk!xa`3kb>i%{ix#qlwPl!St^(QaF{zLK;1OhBP$X4!MaZDh%NsgLqjx zh?Io2w{+)#zN`Q$9ZBxw8<>JA3a(ieYg2sTkf1qM>tE<$`rku_w@i~UfN&0%XV#HArNZMW-JZJ+^~f@Sn`m_^-9i-x?~mi%CxaeCXWpTNbUKy zLOV65&^9*e(W?$sER0B1z7=Kl=UN(0PS*hAp=7o!K%&l~K$Vx~ev#)cygm)1ZDbVk z#@t#K_e~3bt@7U#!7xIMc_t0Qy`{i;`hS0`y=MkMq>Tf7Tw8Y!nXN^Fi}O z#KLjPCUbxek-TSIsG-_Ffe+Gf9UVe>d^!XtBtaFCY-!O&8!J>8&4FqC55caUo(F!K z`F}CzkGFz3pq>8@`aAudiv9QL{MX|=3nrNQ?Ns_wZmHjfZx_e4sJ>rUt6uxBExEWY zpjiF4whR<|ThdNYuKci8gL0y`rn{MVetXh+ZT%U|0vdm`NLREGpkSPQo?mG zMPU}%AW@TCzdkcpn48Do-ep(URN=avs^C%J-0i4vZsc*$u+r5xr5@TZ z!YLJQzq6+n_HJpwMRxJ645fAai=!n zk@M8tyi#L_`C$~rDlTqQWY4u9+xgJwG9OB!ta@=T;j>Qg92YKq@N!}(PKJ4ECZ#CE zEJ#5Xr1GpU(9zOfy}?dZ8Z8k(?UXl9ecfa?^waZKiR+iq;K-yHb%li<&{RQwvz25LXiFBz*8JH%$(vDiIx_)u5lUN>r5#rE+ zVu2oIjY5)Ew+8yeQKC{o6pAV?r33i78F$PDTB`oDvdggoZAmC-7=Zq7DR!j%wzyliR{hd^y@#pDjrztTE!sFanQ97e zxK{{Pr$#5dZye&3Fm6%{)p+jfF$tdKQA3=kxuz>PkQ#$n_zr(5kemCV zbWvxmkJ!sIC>D5TNpCIjo6=Xq94pOS6mn%&6kzxD2nN@v82>Q8V;hE6uGY*ZijNfc zDsKTuvUp@NEx7wmy*gisba5a%SpW{Fhef+=yu{80#;YfxyeaRGiT0)F?f0LfGE!UC z*}{vlLIB=3az3zG{!b=2`J|8EuBHUc_5Z%us@(sx_jLa6QJ$qU@@PWw6nUhSe7g1Q z{jiTI)LF0uAoBKCn_ydrwuoM)lmv(|#MC;387Kdc-w~yRUPfe6U;@1m&3cT7B;()) z(nOU_aad)1K&Jl+U@%2$ov45P6o>y%(dcM~63)OE!wEDz9bQzHVMnGs|9P*6)0-jM6 z6OJxn7*g?G|I8So;>Uk7#5h=gCP2BQXpAVL8_)mI$t@?JZp|PrO^!bG4Ep~ll%fcR zr6Qu#%Th6hlA2<>#X_TOJVq2J#R((haj}@3kV~m7w#}9N8$@V^BKB%9+yd9=mR)`( z33?UZ%Kr`r{eBE^0t1d`=+z(uG5mDN@MrXD(4Sazh&=_-e3C&V&%5SbzwGD31po$u z9CNyp@Y}DG&p0iNF8N2{+hvTC%LpYCJ_Up8(zk%b>7^VI*eZRJq@sRv^ojS!s}0-^FdY(7%P^DBlW>4C=Fe`UsE*Ee@^gN|CJ__5^OU_Z<~gzQ?85O z-FN55N8sbY+xzAJ_+1BQhJ^U#6u}Tt@B?8SJQE_iQ%WoX0WT?fAM@xH^N;#smol+10AQ=%2iyHEFbLBoJd(;O%I`H69(H?h205gn_^@mK zE({Hi!MfRpL`5X;w)lU*|L*_z>;J#uuk0>h?C#feawk~K-2{*C(qwXn19BH+^ltL` zj?E_G|DW&TbmxvtCU;~qySqW7c;nedftmq8pHfT>bJWk@i=_9pQ~v9#-m-sh$?ajD zX8AuQVTc*cWM1M?7EaK86T!{%|8DJ-&wtBTPxAjGJk=i*HH3BP* z;j~)szgvbkWlo)sPd~=V{tV(sN;eXI_f7Gh_64dpCt|9t4L^NIV5$4UyZHcAnkZN} ze#3lmzqhgEPx!R*e{&+~9x?y!Gl&0g_4ms6fA(MOZa?w=$9PICLtd(wvh=7v^rwmU zpKC%{CBpXi<@{#g`&Cz)>$t0lB&@4h(^tvqmYPF(VSW~G{%JiS7-NdULrT(v%89@= zaHy~PVu-8r2ZtoMMs!n#8Ig>G!~KKH_dgw-ogE(@0YbsS5AT0I(awk0?|(iyIx|1c zk1mc6^^YHq-@es^5BQF_wO(Ci+4Ysyf^Lbe>gn8xcvF-_~Iz<;kD|aa01K&&jVwyd~V*DvtIT^ zO3AM%;3%|j0*3A1Dz~8h8Iqgi2GVf<^f+(8q%-3`$=+qgGx1d@X*$9@pDFS3`1GLo z;`I2?naeEMXCD~S8zs%e*8BWSyxmW3pUa)}El#pe&EK;$jZlmd4kLZ{c2m(Y0evfi z)Kj;nHhph8{HcI_+I#=BPs{#43mzc$>n{uFe=jQWU%M}!{6CNLJZk<&nIgO?n)tMi zzy2XaiJBwY?%3VvouvYJYh1^Tos4k;Bm5ar(?|Yd;Vz79qWl!^NQf$tO10z5i|3IH zGN(mbAd+)zU+ziD6GEmrQ8Z-<<~8&8Y{&kmESh^Ra^Z_;rgD| z$tfzgbJgV9>DsGI*O!O5=DeBzQxdI^^Vfam>3@UW?Q;C@VC(7r??-uR_`g(W0*VB` zEb#rh3;H)I!K?*#t1jucUp2E;3}00LL-6;?+J1l&8DDJ1`6ktSy)q|uOrgB6!$wp$ z-`QSY%wB|P%N4qjA&Fvg&aw!x4V6w_hf>Zr7kB`sSZgDSm4AGm#}3Q?Cd`6|Bv$2F?7B6ue7m$ zuPEog?fx!S&lws^bxL#nAy7zj?~&D~Z#n=tGLfE>1!OOas>RS+jI6F4x~%J?Y`)G$ zzoI~>X&xvX0@nFr+reqcExYV|KZx@4^tAB*(ses4^f8D3_jh-9%lv<9=gI%~D9@L< zcQ-X0{TH9_dGa46vq$Fe>fHPL6#gee&PA)`(|7LKyV_fdl}Ow z01FU(V2IXo#)_R@7C(iI-4GhO4X65DwQ*azxr1EFt!a$HB;1eEDLlzyu+wjArt#dS zz8$)S7&RG$F!eZzqx{IQTSXak8N4xCA!##d-1Y)DPU=;P{{LDp+M=7Ir@|mCJVE?P+d}# za_NnE(IgLXhv>Jc&^|p&J#*zhsEoC%h=KF%f0g+Et^QN|-=jSB+k>{y8&A-j1fa(D z?~02L=#44C^Z>%wV>>CnP1B!z=C1#kB$yNW8?OJYoxxUR{cmqQt^db(>ev4}J?Tyc z;BvZsO&2(s_yM7aFh3x1ugn^BL-*Ds+rhKzt!{ZPR;R!fRtW)~2WwPFPAUT6C4d%eKC1YnO6vO-W)TP3O+B zR(-%#9c(791`tvwF&7J_s9qz<0Tv||Lk*9Z%9mt{+ZV$Hr#f$;=Kg~cn%ZHZNRkas zCYI8iSBt{qw$e_20Q^cua?kM`lu1HUXS_nONuR9K+yKt-dwpjig+ z2iT#1+oxs!=a8du7O|C`fb;CXFUseC2QPNFpYlIE%JZn_e;kskoCAVs${g`QmpuQ2 z`0Rd*5VF=w{EOH2|3*0%su?%u#&%2*3?p?L&jVbSzx)OHf0QfnD_!J)<=(H1B;K-J z*4JMO1QvL<6)&Y^hC@V`2E=w;(^v~Ef5Je7DuU(5#Zl>m;M5_l%~h3>u$0PR;l*J1 zPUEUfNm$D)X8E~IGa9Xslo!8ZG$(x;iW4 z0;7^?K&Pq=li4sJA$s*}{bqtV@H~|^)}x4}BwCIqT9Q9(o+VY0VfkMr$v&CezPV>1 z{=Z@>h`I9LR@MLabpPAqJS+46@+r~AGmg1&@BWD&evchMXFjOQYoIo}-!-R!#KlPbqpuSPqQdeK5XrZl&??ox5N6olItrMPk$9)SRlV` zLP8tcr@6QDW|hpja6HBd=C_)X^siM~^XpWAX?G;bpyaSR2Q_}J}{l=nuEv)%|N@{jH& z=z20yq26bdKd$?WuB@!j@5DlK{O%rz#hX(Z>#Hg{K6!`SH-Wuu)hV614Yv?XU zDlEa$C=weAH;6kyGeo~(DWbgp`+|w$NkS-kus-I=|GV4e{4YB@+fVub9_3jBr;u|* z69zZ|YU07o6eVDk;V8t(1f(#yh7-j6&KkIwVkUh|@TQh3Qi`QGT@x! z8ImFS&U=`Ioi&i4iMm;GJ*8-jKcP_Z(Z6o^KrB;05?ND#1X4sngcIa<{KNChb51Dg ztbqd($0Pwi9h`#@Q`Ye(nD^wr3jB^g`b>NBU-M!*>4|^LPc}<>c|#)@TxTg5;|Q_N zcRssGJKy;uc-{HV=W*Kk?*Dbxz)z53k}+_6c*HvXj0Ge_9bfypIzA2&?5XmU{MwNh zshodnOTG3fum7{7{lj-hejGkvAFb2$8Ht^$%U5}}URXaE(TU>KTf#pUg%%xP(Y`7|5(nDj(>FA(rCr*4*pkR#DL&iveAvbJ2M~%d; z?1s4*^3}M~35AlfU;<1iEzkn5*q9(l&<%*;uY`h`(nC9)ef>gppkIq$LNi&9@HGOn zfxqQ%1Mi<;w&nNz{zf6)Hsk^niU4K+CLNrJkwA_|5dtHM;I)84x0fI!0gPdi!3aQg ziMs@U4O|egBbo>6`$-74{lS($*bw-`%)+Jb?qX`8{qq&r62d1FEY`SX@!6vzq7B*OC9feNYm&>$H*h?i}tL4QTcJ9EHIk9myfBYv| zJVI1VVmf=cpbRWo?;VN7` zdesCM{3PMZl3K`CgezyH=bS>0Cbtbu2|6}K0n=sMS_fBOK$qX9s5=DRGa|YC2f4p> z!Pho;C0r4np#(8@O34VdmIiNiQ6S3#hB%jpJ=@?Cd+8g*yF>5=bcJXyfOKPBH$hiu z1>GUo?)UrPE9l|`a}1+H6v5kb6p$oj-67cPZ-TClbIspwi??Hlql}`9DMf5bqOd!Z z5$#t%FCei+EHJ`5aAt}wG{XB1@avU?t7LU+2CPUBC0ktsTwNQd*ByeL7h9V^y^#)R z`)cg|7Nnw95XD4xVU9U8fp!O^O^J-bsjdQK9k~TDh9?O~NyuElR#`#HSSX|~hB={4 z>qqo+EQ*4>=$zKURY`JM6h;Rw4kz{_gS^ zbKqjrxuY>~u_*-WL^4AdRiG)rwKdw^8}AMJdtn%Czt|g%cJ{)*zu4K@?uWbE{TI7C zqw(G@+}r7g<594?wX-|;`yhO=6>g7q{*K1M-jxetYR!Oir&JWx!NPNfbagyFA^emg zhLV=WRjpZsOWUQJmw*G8zF1`rTwhvwaywjZqOsOi?p#4z*OK|)n&Gmk|CR%<0GD8N zi^C<8v)xEZ+JJ}|Kr$OyXyJOk-tveGs01DS9Hn;vT z>rp9Ct+K^I#yE+Msx`Ouhk+}Yx2Eq@(icHrx~!ee#5ziP{Eg|BZF%i;QH8=hDr~8 zf^ixN-W|ZG`w)v%Ft=04r%e#&z{;W$g_4V;6s0H$#h++~=&c-s4i3;ZP#=6?Xq-jL zS!zDvWCA`0ijuX7SQ*0ie?ABMZ@;%43bmh-C_*%MQ(Rj~%wu@M9N!1LVIGV|o*tPO z*xJ2GhC|fTAA0$s?yc$mH~fcUdc1W77w~!U_PmI$hVm=HWzXEq*NYEMKlI+An9y5$ z%Esn(=tVfjg?8t1eX(%G;ba+Y|%xaTX7yu z{M7=M@qH7xWi5Jh;93P=k|`V~m>*cFrg>Av+!20^??wwG#i)!KC^*ugGO> zpgkyD>K6Z6=Vwo#W3% zxGc_s2;#U>{%X*FgCArb-8_Be2Gu;euJ_ZQ4x}8UTBw=Fl{|YbnV7DMa8>4U9=;w5 zE+@ZKwY@uV>d=+87sSo3TEV-{5Db5&n4|YefEH$CRk+#|JO?iRQ=W{g(S)`l|8n6P z;{-k1K=;q?E5P{QK;H8vsB&FIes7GOyzOvD2qmFk6WP~w*fMp{P zVruQi6>>9hJrsT6M2Vxd4E7Ej7g-l(^0qNuc2npait7`#YpUQ6p*NY#WB?~#tOQ<8 z0FF~iW^!C(LSrb}ygxH;8;;z305%7&2=)V@SqO*+Q*x6WLJr$D zFo|Y~gG~^U$vYrIS%0VhJ zzvK|T16QMr-Hyo8%6%hTwNB9aaB(;(l)oDI+XH|3=#)mdYMr3-;VL>r@4!{JyNWto zg{#){IUlZ~7j#u$X92Dj<+uu$kp3%rG58zMn(@8&iDkvy{YcupIdTMC8A91mekDGE zlG$Q)@4%5X520j+DN#D+3{tG@yD4N0Nqx%1NUW+xB{O-_<~1c1Nv#Z5Ekt8VVgPeg zDmw7NIZ{!f`=`esBmn~?0UwV}_Pq&85QQ9tuEh$bxHlX4 zgBQIul+0$3s&2j6zzYaPD$G`HJ{*LILmV;RqRNb03|Tqq7gL8|(R(Sc-CU1#Jcf$Y zjBbHCqbwCm=?E5;?Pj74t{`Y&FIfopWD3d+mEul#~Y;anBI_*6=)&GEsls0q!DHpu&NBA*us=vtM;7O?eK;aVW_ zU{!-zAt`@rG{h+h7mR#+7`PU3j~KWX4AFQHxK8H&?QoWPo@9 zF72Lbv*DP2j03LDZc{gotNR_I*U_y$iX4tv)xnWsB!qnIjOErIN1xQce0TDS)O zpzpu9pU7H-t0j=V4A;Cs_7(Y80j|@Q;PonwU`axPqB66M3Z18>O$2QUKawV)6MjzQW) zW)Lok`S;*qbKwdx6BKnQ>GcZSf&GBv8RY6*@r>Y5p-6n!^rjRg6qcY{N~oR-myELh zfYrH1CIJ&~)IolkZbX!1m?T9&q)}5q{NeaMlKxz{3IPRoz|e2oSsSNya3v)B-(o!~ zLSnBei3O`==bUB%&!`Km!l1F5aAK~EFo0HerJ&4qe~62KyaQn?-M^@Ur61I6oWwK-&oek(S&d*wATozQt-D%h%ugk z7^XUCwaPzDVgi?sYSwmXOV5wvYlBhxJ{kd)7W$RIT?Cs=02Y|N1KVrM?TxM>Av92s zNHN=hM3KB3!v%FcOCrQrZGSqTOEs_5Aj|+me8X3%m}Jnmq~tt+PEu))$7!!*GHV}I zu9;rxQC&>k(^u6+77=nSNfw&`XgL?005U_A3aLtMF^uEH5V%B?^aTk9v|^;%VkonN z(ccUdLa5oatUzb7m9ZeYK$gfIV2WCaLCA<{#@#tpgg}wZ?SkOSbf<(MTWQAa2{S;mL|VHHA!bZa z;W2;C0E`#`DPhXbB^aNC_ZIBGeJg(|5OE^8uF@B7if zduzl^aMcSYbCyI7&jJg=6uu|o>Y49Sw3dVGQrbE(4t2KiXCb?o|29sP^LH1gVv7z@ z!ZD1Pzp&n%!<8{ae?;xaOcY$#=++2x7A^@vJ#eC7F%$)q__Y9FIb11Z?1s>=4Yqs1 zWg1p+nO}2YQ=Xlg8A2%;)U?T4aS8*a0 zT=S;WJ$G(yYiM@Z-IcCtnCE=DYr%c{QwNulz3Ua{10|Q6Gw1pelE&6ow2T(vN*X;+ zPJRjvkX!@GZn3Ejg~}is%ZZsX0GMDjQw@(Bv7uBwfFflHxzg&(l^EnzBfJ-g)hyLk zcvVz3V*C*Rmph3%zBz(H%goo%fRH@|xM$z*AGCNpR-^$5m)-ta&-g32n7rjlLU)$m zlxB55ZKpQMr%umftEk`3omk8U$s18hX7)@1+zaOn)GO*lG4W1Qr9RFwvSKA|8Jvp2f6YcLPpcg};{&wQm0W7plZQojTK zoS*j5Mk;kHBT~V~B}TX+_p7D^CwbPG;%G-;bK3n;X~8+YJp(0-h58MvDBhTVZv z$fwet>UQ7UI=Je^h*KOaDHYYi#eylSH(A_)bNS6x`E|bvQn3RS;hJ)uu4qUxaPjE> zWAECV+qRK-{^qB^D9y#bF-h5ReHl(KH%^+iZYNDAZhD=`bQFk$B%CQi1xP!(B;Wl8 zyLgeJDB0~?=5E4&Y>{{@9t$iMyT3Kw5k2#36Dw+3iu}r*f>~9p1h*=mKL=3L(zV@E z+}uC-!+2-jdjTYvcrQJFqK<4ITKmUQ&z+wUCB3t6o416{;HJjk0^oe(O81z>-6a4d zd)ukCl`ehen6i>r^UY)33z$u>Fa?e_9_?1E5>N;f;JYa@WitVH<8!7EO6^uYUIzOM zjl{m=N(&EN$bpb9A5B4&1$l+0QD~nkh-6y8X{CRA{0D%FVWUI;Pd2pN2ktEZhoG~? z(&P(wrN3+eF5W`J?p>jW%pp7W4_(@B2g$yb%nnd{+^*&Ab~5{qep|@gW!S!xOo)X$ z-psc@nRgiUp;#5eWbTYsv6aj_^yU!!?+}^a z2~`u5aeo@gUi(iEnL`0KhRC$}NDfgpZbjz9hj*Quz2Ny9WQN$z1K1lo$<)H*0I563 z+_X{|BC}DbtDTB^;V%jy;F#<4elj1A9}l_#^pI&btjj)NDd&F)fKn@S5U}F5Rc4qb zBjgqCehI+h7^vH~UXY{Y$1oWn$OGsVuco4@3Q|itoADfv^`g>J z@q#%8VF1Oc_EO{TN9S`x<{os4rfw5ji(O>iQQ{o(Y{R=s2S;Quh<`l)jV1jBMbyM6NU9&5vQ&ZG#vBbcBJu zuc=H+KDE`RVLF57ml=(L>P^%FyUg5A8{{?y>`3fz7abfXu?w_$ftV3q7Y@NNB?E+f zN@nbaEWuMHM3^XAuoGq{NXS!GiW|tN#F7!y$7o5(-mOYn88aIS`4-~FUCq;cVd}|z zulj0qXBx8t98Fz;SK!+i=KoTn%E$*76#ab=9(@$p`HZqC_>v?^@C%|PuOLdOE)J~s z;X1#QBP_b+bqTTTE_J(|#&~oTCTe)0lhA2i9G^ToZGSY8DMuMOcCSBTpmqT2-E6A~ zQOy;hZvUl7t+TLfIR+M70#Nrl{Em?`-6vDFE+q@{rd=Wu-8GQd>uh6HQy;_rK6%Av z*+!JxTr2};jr7E%o>#2CC^BMy+s)Fy@kY#9pofBwfUCIKL9n{m`X#+FDD#bCGS|X= zRRcBpcj^!M>5{6N$&koSt1ag|J->W@cG}{c0FEWMUYG{RN>s#Q5`m>}AsAv^N>(?r zBHv%W{Lu_VxdxUA%xbtzrbqiJRqX7Puqqb?D{#mUSTaw@w@gZ2A4ca8M0Vu@moBIx zQ^uDvEyJJBH2~@F~pz_^U4l5XZ?W@GJ4$AfwLZaGN z28%oPm+OQmGB3`5x`auUxB<(ce8HA9BVXBhwibK5)V$kF{2;WqI=Y_#u0I}>i5VZP_Ut*Jw64O$7#Xk ziD#Y2&JvtRTn%=auK2;pHxRbk^_O8x>?&QwgOhJ`CHjx8#&sNUsD%t@yShg>(OAGA zgMRgB<9DRlw+q&;zv0QtUwhj~)+A;VyV;4kqH>(;%p8w)C-|mz*D66@EghNnpF|*E9fT%Vj!SEw`_(I z@(g`dE%SAXGJ+Q?pa49Ip$B#pLJ&yfrOgWCb5SHwbWSA8#d6820`E+?vQmSnf||&R z1*3eVyMz+SiYB*S-^-?~xt8Tx{%g9gqIOAo%o-{2woNZHD=Ah?mTR))QbIet`$rN* zRz~c)o&}-|UGZOVi_ggQadgG2BJ-}e(UR3v?{IqWLXi=#5S$?A8HpHF8nex9_@riJ zbUcb|VONh;XV=cihd3hSO_C(PxxK8-Db8JJm{odQ&>XK+fV_HlPAR#nkxy)h$^#*T zX?3LMY%^b&9n?$^R`l^tw3UCzPX6h-v-LAjgbZX!4Y-Hw!97|wFm`qcd992oykbPp z9IWdoLSe3^7UVQ^Kr&$^EKem-bQw?*^bWYIhRlgqez??w7%*iy)vOizjM)vwj1E~z zfLDMyMj-gS^(Mg0#@g!x5UZyH!u0Fin65NGNlt4&Ozsd|3$mmY+-T@N4Y3wr*iGA_0ab?mY zao9SZxIW6Fu*VYp8Td0~dl2l~d5om|m8$ij^vK4gFP^m2Rz6QcUYQi;NQ-O5x00?F zykf?3hn!i;SBO>{^;h@?}qXipY2v2`!UEj6{37%^d6~! zLLpPCcn&~%-Z7+0l{n*d8!5+;p+;H%P5ZWvdz`Niu4EC^@U@!H++%b^F_OYXJ;L zjUpb7GQpZy!Ryw>6CmMOqIDWQVY+IpP=Ikhs6*mQkGt^ynefteJpQDj3ld|%U-AX5 zDpn54FhH!O%9?QAq+!;z;4`AWczWhH!V4@|H^_FIK z1gOXeP{a#R;M)3Mk)i@Q!1$UE2u~ZDJCS=mG#ZQy3scZ5ao8)d?UhiHa-$V0&HHl2 zxQ3vxqDq=y!S99_nHzFUa-T3>FPMysP%z<6y?CDp{~GIhq2=0_a*oRkR}-sj_N%R8 zRWXrl$*Y3N;Jq=-%-0z?#_iVZX%jsfl3(#BgRmnO;|xFk4) zBp3V724=hgS3xSFFm&!*}A{)V^z0 zrJw~E!?bpsmQoc40IqZ*JhxA7JCHpC7mn3n33f2;o^LZAyG`LcykG#elDn@qODCwz zR}d(c9N0O17)2Kq5wbuZr2dR14@>6B5o)orG!5>PY++9hkI>nxr7^4pS&POh=isD} zct#kw$_lP8a6H?QD{SaeFto_XsG9TY9d5z!Z(BdL zXYVpH(!`A-cZ22@hkqO-e?9D8z+F97?&7Ur*U``&qQ4;A5Y!)y$75R&xsS^+hONat z#HHMv2wLJP*8wKE-C+WK4zG9=!H#Zd)eEz7(*SPZCN}k>6{$w8Q_Yl6^RGI3WlU6yY%k$9}V5q-!A(-MLPK@Vm0EZ)eXm!5d zK1PEUGJ=-Jaon%X!_nwbrkO7d0~cdgFIS1|Zbx~=yg zpfwj&UN!}@BIJHEB-^;c4s(Qy_|`_mFmEDzq>v0OafVurM_F98Uxmn)~o9{m#k2MnS&l_Yn20W zS>lVzj4)n;8yQRCss$q`*;3T3WzNQ{dGGiQTY0C`QcT^|G6tE_MNeFz6PPp;^dd`^ zMKHuIgQ88*z|Hwi8(XGNQS@}}wP<~|aD_ru0pLqfV(hD|6B{~TfQb~G+Lf~!YMFZV znNL|vhv?oXeefDl+8dR0mvzHk>KKp5y@yWg8)A8x)muDwuXMejl zJ{f=c=*xR#{O31!^J(-!{oeQw*VS#@|H$+HDc zVQyr3R8em|NM&qo0PMYMd*e3JC^%nNzXC_jJl*ZnqAtF5bT((#<8~6y^u@33WHxV} ztQsOA2{8$<0Z_Km@&5LGa3KlaBoe@5CTO?R06o5iip-{yjaVLniFc+A%&nA!w zX9ffRZyPL~PN#Exc&PsGbUOL}y9Xz|zjY5ghlfYUz2oDPzsV=Z-M@j(CXuKr5f_mC zt+RGp`ObYM1rkC96qpj;Zvfyz0ew1lTquN(98B9V610U07tr_nAf7I_82Y%y5t|~` za+&vC*E#4p-A<l-*)7b`)F(Q)h~qh@dZfie1#6c85s7ZYeN^ zd;~c%=w}vIMg7ka;)1u_FzSPoZl}BlNwok4l+F5}chEh2k4x)DxaHCyqy!ORh|_=} z{uwcjDd~f0w*kX2`Pp^eI7bZ+ahKsxs7I$@2sw5&^#MmB3W1nF0n8NO;0}uk5EG=q zyTW(C6&fK11O*czLf&t;WAe3qC=eIz1^^?7210lmyF%$Emk!^h>nQ^0GULq!UErvqMu~6L6OlZwDT1mAA0BP4+rc$@j*xP%H zLWW!@kOv?KbR?@NbR)u}Fr-Yr!31=jZ(jof1)d^cUI4RycwjgKh`^yQsrAVt+)=-f z_njoxr-o*glp#-Vv*vBw8vt+s$4I^@?@WLhlV7(CjQD@`f$7(=(`x__`q3DZi<;mx zkz%0w+Z1NU>6sc`N0I77w)cvq6Aqx~#Uy&uDDPoo2sz3;mxO6FT!KsKQ3Go;v@Mt5 z%-}Wv@agy+nj*h1HwL-+qI=hmJoF(J92APJy!6llx6u&AzZ+=G5a&NYf$rd}eAvV+ zM1AmqdgzieA!lXvH>$%WpGX|MD-=+H$_N)?pNcX-HAtks%@U^}kdFc+*sKiZw5KOc zZj{D89YWv8hr%~AOVjBs8o|gHS7?m6gesW`kPG8}R(k@f+S8sroAp4d7XwVNSRadN8Ss+C@Rm)7SL17jypo_*v{hLx{ss#3qS{ni}0{wxJ zak=R=8!}Z(kxaR>S5)DkotrUO}Q0@!*XO*O~n2ntg7`1s9n`*(=Oh-rvp21k&J1ZBM(6TTHw@IMd?J74=agKmvMDC~3%tL*yH2u^J21>4k3?0WtF-{&BodXfUu=O#=9&(}eVb%{p84v7#6d|6T+U!A@#9qV;dsGWDP_FcJ*0GIy%~VU-r5L&+V`OD*iu& z9JTavX$3G`YZF)`|MiaZ{QuJlQw(g95ms zkhc;T(~Y&^lnYZUfl>psE|4;zrw*V!aQlW=kD(V}E~k;v&F$$^Y#@jP6qt_)Q%r2djnQ2I2GT|`J@(7q3&M_x`>kp zQ9ubtLRU@C0Sdp{BTS%=|IjjaD?TDX`WHi?k6oAm;sOdZihT26A+C|2+=V_S<06iV zM^GZ5A5t%_snJ*jH#Jl{E@g;v$E88B1taU5+Bv{uB=lG?1O+O#p=9(~5oky~FFE}g zs?(o3C|7dEk_nY=oR;>Md2U80^Ose35i;t4E#InI9RZAI7x6-h;|YVN$=pcj6 z{iBWUk*j~n9f1o(#FY9`fLtHqfXCc}CG#oO#*!bwjo^J12GmBHrEGj=-+W1F(bcIF|lcKm<-AK%r=5+Kg%Sx)&Kk~5Hkomuc{Xg&j zcYNI0`G0SvJgxm-&a#~Ozy4~^E@%mAXZ_z<|995^rAih5H=dycw?e+2^?xz{Kk4Q9 z|H)zZXvhDzQOcbs6#Guy9woQ6Hwmq|C6{6Jws1+kYqO#*wPsh83_~8al9{zM!- z!h*g}?=BghVjqpsId`EC_3ahoSvlfbo-?aUvZg+`ipVKHB~sy>P%b`bE|f z*yF9_D~{e_65Ut7kHXML0U`qWSzc49s@3Y$Aje{JMso0?m}n4+*;~wv!G$X)y~YYV zwk_9hdQdExyN(i8!eRw?(c@Yhh&0m$7t z1)w=We&Fy)dqU}LQ|>Ta#4Ws5KcaBVpofaJTXzVLC!*O0&DK$~{n9D|jy3(`g2=RQ{fx1_I)8V5dVN0l zaQgn-dJTZOAE+?RxOQfN#!xP}ChoewTPT;H6&dsl^? z>AQV*%=SH1-tSHDr&6hM_`5U+pj-gIHrqo?+I-S{-E6r^VTuc6W2ITOBhK1Bb)nye zVc6CeX?dGP#Nm_77kbNouZ-hmG&jCBx>}}Jf&51M=c`6EYaUACOP-vm%cG&LSzRe7 z@mUuD%gJpj)WIySY835i?B-WYcd~{}yOL#9MYKeY>Sx-Y>%p=Nbr#ZQDFh6ZMrHqb zM*8_sVa6;$zgnD+0c)w^OkV;?kxivx(Cu`ykBy9%J}=31_fm;Nj()hq4u}l>aCgsz7{5u`gC>t^3o&V=n%IfDo$=rCP z6Cj4n$;}3id@4?N);!C}QXnq7)0|qTIU6`-N$5XNjSb?kN4tZUl@DJ2!_Qq-J$Bj5 zIZE!>#obh2Fs8Qmx>gBuP?rK)TtLC&d9{M(U|%yyF-q)EjNW6!6A_;yUzr$9 zeW~G`--isTyXwgi*Oyx~vmlrlqb)opl%dxkwvzV2TZ;GxD$egQ7Y{b_*O_)8nI(?J zztiD6iE^g@DiWu;qic+ITAEXD+bIqToPcbM#3f~-54uO4Z#(KKCIYc3^yM>Mk0C`I zOvXviv|;d_rWyEkFp$GCxQ&L05hM^Fgwz`(+Rh-PObleFVhI|$+7fq*P?(4*V|3pK zFP#0B?vcbcJ{pYcD?~g-b00O!5m2=UFP;DSs#zEBgz|Yz847*q#zLVcof&}Sn`MYo zMvXab#Ge!ltn03xI99hw@9=0VC}|;GMClZcvsqYOmXw2G_}eOr&0bm;E3^wY&elyZ^mn zr=P=R@M@$VZ~bTa|JC{F+xO>A;BB;x1^oZy@E{-m|KOmz^Z(vT*#lR42AGV07qLXg z8;#TCoes0+0iEkciXHo%zAtsQ`tn6K2iXnhtg8V>#?KB#DaVu&CfGh25 zqXOE2Mq_Ues5GSDEea9w5K#-PRhfEHgOyN!iq8Yf;NDPxbeeXid-CYS@v!4SDH;s{6- ztXPA9AmqvRhX}wa#6BGQvK>H~r*koeOzPJD3=AXeduo`nU7*_cQK6tRxk2A)G%iM} z^Cq=hjFbRpS8p|SBKY^Vok01>4?NPH#;jHvI^J6RnuMp?h#XnZFQFj z0nPV`wcst}lOcu7Yf6&Fx$K&j2}c`Bl~@G*a00u{PjU*LdY@coN^WtUT|aeA*c)nZWA>``e3M-clCxX8c^ zeMkT$AmRzCuT`FtPZ_iKcgpMV6S$ioV)soqB|8j(@+IM;$j>zGG#dXK?C*b|0`>Rz z!A)k;X9Ac48tP>XIgbK019X>@{wZ3=Lx9aZHRQci&Kfa4OTLxE8IPu>PMlLwUwM`+ z!9_>~^OIwYKsw_A3`0o^4nA2)w$eYZL*zQz9{wS3k>e@FusZ<(g0f@2Z$+EbgEB;b zy9|apVWKtrhO^io*DyZaHN9Y5Nc4&k430+FeGNmk*v5~zORJs4)5q7PrW_GMV5S8{(;RihrP=Wv*<(ObLpsmEf8c*40mlA@!JK}$d$09SpP}ksi_k;he>WGCEV`Zh{nskq=Z;R+dzc%GlLrFqGMbEGUckUA`z|GNw6s_wyPoD?G%e=@ zB(+Uru7sk`5%g7Venp{>Q7=-5oDTSj>(tJ_8W?7h{q{1>+kSU<=VX#UswDo@$b-_dvr@VwIQ$l3V76w)Qtp%hM>Sh0wT_8ve|$?efsp&s`*w**8T&-u*DGc0<=jR&;t9<(aCW)pa1<} zcmIDoC1?L>G(JWQ{Hp#=7kX?HZNEzN69(=W49l%Q#ZU+wSrB{$Pd zLesQ5H$9LtTmyrzaP&NOJ`hYN!Z`l zdZzVSSq&hw>GlM=$gVgs(d$XgcWuFhhOgUm-d?A#lO)9Tb@+k?X=ZPU zg0NZjc|{fO$!rVBkQr@#nemE0S#q$lD*t1T)R;js%B$RUMrmyKZiykxyAove zzc~43?h@mgI>rM1@3?bP$p3%X-Q|DWO0j*v@)-}4vrXgpr6lQ(#$h{+-r;$aoFz`< z+Ed1k&Jz317~we+pVh8sUprCvgEJ82WSj>{O;(f*aOn~}?-Q5c{1-a9&kx2?DfU<| z`#4)O4`)uyAmfQ%yP;gw`7LF`3YiMGZ>%f4hI85XW980MdNrC0C?ejH?=#nF!}umj zubX+EyQh+Q8^e@rt5*W;O{Wsnn@;W}p&rt&4!Q@KcXlhhq?N3?PT92?f7M#=^oANPWo1dYq~-K6?2!6T`xb))P;deTsY;_b6|zl*5!XmuFjcdpgQorlTL0bp1_+D#dc0 zz#As+oq|{$dm~dT6MjQI8eoFO=J8Qr>mjA9W-u!c@MjRXq~6;k9qK<+I@IFCuCAN* zwTvUI&{QsxbeR+(f4=>2eX&lI6~qm-p?wxvry8e_7t_QDk0oFFe%Uu1^XAb68w zi6z~$KFiIA61JUV_UlYz4ZX6P`gIODYy1WBQGf)S&D(gAO2z3k?aiN|>8I0P{_7l_ zN@PV#x?G1w5^^PlW21-`{w_TGrq512w=>F$XCl~*{dvv(F|FxZBJJ4(_rb5fH7sTv2l&K+RB4jy-@mD-pcr!1 za-szpFQ;YjO24r%i%oMI!0oBWZ{3nz{~QLPZ$*DX_v*nT=YKpt$lw1wIojp_-A-BG z{+Bn@Ei!GHpbeH@=?`03b<(;A8gV%}v%#{HJ9QbuR^hImeyHx;@oYFaH;nqA*XaZ+ zS;d4Kwp=Wrhy2!I|dI`gJ;JkO+cTwzaYd87>5(ZSI{9^2D=3{BHJpGKZDrgZEh zbt>Rf=(UE>hr~s!r8RXG3WGp@;pnJClp}s2I(GE3R`>NIk;M7vS6;&WPTi&}1u#*8+bt_- zu?ppV;A0Zqm%a@donjC1)_BV7FiObF_p~MHSn6?)( z!jf;84z=6TOr?yV*|3Yf1`I&s-^_ zL-|ujwV#>eWhn(yV7ocSq%=1w1;jp2Jrh$mJIDc#hAn-$P&p^W{+dd*Hi!I}#6E5> zxE3Xx#)Kw^HT`ybjKw4xDz2zGkX9q7)6(;W`odZzP=q$OU3;F+l-} zSsIs9-EHGwtRD<~I;I0zbAq7fgk;>fMYB7~Jf#fjXjd(x4%v|r>C}ZB{Ajyw`Tfd6?i}*oCL7=m(iUf=BNFP`_Zs9vSbr&Eh5^Y3azL{ zoj5qxyqXi_2YTpvBT>uLB<+6n%F%y7nX~^}EytZ?f+XTwi8<) zG}XvA8>7hg%ilEik_{l^z}=Ml4xiwWXsQq?fC~l%8qb`@9{44`p)4r4CG-wJ$!?YZ zEe;U4n?RwRxUOg<5cuipUE-WSvECOYFui~3#&t-A#AO!}-M^0^BGos+arm>TnippVbmZ`%)09_Yx<;JXl$^E5qhlT@2sTj~?XqC#>*lX;8YkgyjYghpe zuxruh?8+6(n&0v4)MVA8 z9U!6(z03?($Sa?8u4kyZm>^MwLZBej9@hxH2E!r2GzY!{D%NKJ*NIm=S0VD#O2U_?Wq~f{$+?+)5KKS*;i{Ff? zD1EfAo=rmIa{)s3AF`h4SfmpglaL0)*~G zGkNUm^R^}?BdVWDW;GaL40R27=xqq5@Lm zF~MX!P(g4d^U*q=%4(~ONxe=-WrQ#{N($2}ey(eOtD`~3Y(3s4&S(%&$sP^Y6p8)pw(t8HbLY%?Z+$c5UhW2z-C)!JQHeORI&Xpu58CS%Sc#M_76 z!{a^mw@U-ba$ASr9vmGWbPjr2Ulx9xkEhIC%uE50VkRrIAvCFb(xjE~U_Z5l^}Nd* z`9BQ?`r+fv`L!eN#cFLVI{!U9Jj}=c?;Pyz|8AxHTg#hLE>Ysc|42m%sbtf!Rqh0xArOFz0$nJ9s(l@FT{qO3JZ1QQyFcBlr{5x+NF!cw$ zYWyw2${8C6aD-U{gs!D@hD8KcRa5GwIJYF)na*Z6kU!&GA)xs`o1i&pE zCe&)8I;I!Fsjs>?q#P+V2PCT$-C6ZrN;viqL*Am^#!jE5(K#7h&Y7SLC5Xfk10@F=~!ofsd$OWhqM zA3HZj6jNYd0X~V%#j%}w(Tc`A32AH89vHq$pM(>VBM z@EUx><-3x(R)ilL1#^!0-|)hn7OP?F7FG_W?<{8h=l}b6{!P<<`|=xZv*`UMXgd2% z`_ca;A2na)?x-Z2--0%EgR8beywZyig&*G-+v9zv?nJF(eYcU&mWeoRPrz5;$XL(C4E3XIGskpS?}Eo*kl5nMi>$k;e8>aEoR&#FW^w z>R4vgQqERUw|ee{ zRt9ZX$oH~|GZ<=*A&j_ldU;`l0Pg$9amc(o^1r`>AQeNzDJ5;g~@FFEmSUWxzItLnkZ*WKcnDPH$D zW2O&@tnjFH&fXUbp_xPdQU;c*Q$qM+g(A62O~^}hKjXm?N94)Sps99H(8pk6rXLz#0=~ps5=sxtZi-kV} zn0Q#*TZxX%m4%fAGF)!VN=0>h6;`uIvxsZ+$#m&f-tI10wN|Bwwo&f z$e7id%}T48$$DGs{KthlgUgD(pbPK+9_Hdd_qv_Kquu$>Hp;q;E1y3y%@P^A1eT<- zt~9iz=NJxncxc7*u8=ku&*SwFB#) z3AFY;cu%oafXHr=?LK}f!Zkdos#SmGo%}U z{&#=|I1%JP(?0ubf@c22wQ2gAN@E$}rmd5SSw7@i4&j!HHd3On_C;1P$+6I9(>9`* z02dl}VNF(@L_=22c48p=AjPk5+`uvy>z>51xGKgoLHe2oiR*WPXp*F3M35BhJdJl? zeEfx!QniZhLjN@wF&ZSatj5!X3$s23m= zu8$EB1MCe#hDP|lvJGp2%>iF(Z>pk1i6yHZ?4p%qQ4_^4=Cx#5VnH)w->qn;(jGjo z6|)MOsadrGdcpjfg=%6i$yu0VPf{RwYBpb4YVH5&*-%?0c3x5pSiJw|;(zpdM@PH( zA6qH00jY{j7LSefw9m^8^K+vbnW7}YYKe5Arq~Ly!wQl^9K%FlAtm^8*)n*q9qbzb zc$m9%ir7clL50>pLmWh*z=68Wjs*(Lof_>wl8K(tyrdonX>EOH1rpu|hvujD=l`Iq zS2QAvB+`w(1i|24v$Dlg->0t1)*PUKvRV9nM6l?C=5QtuUm6t+Y5-%{)krBgnDjxX z62l)63<1Y~piQDSK`^ep71xCvB^+*^7_Hb5;Ob0MX`ftSEronRJDKi=G2 zu0XK_orw^kjT8I41kJu0Bfzrz|Clas16Z*CA9Q+!{lC-g?Dqd{lxMjAkLiEO0z#Xq*i+ z)dJ=sWHY3m(sG-Lqm+kv=)>8EL_DcxrdQY3;C)>gVs57x;z|jOH|q?5@{Rft`}G55 zG!mH9>%*81#)S{IjGW}XKD(=8D zx!RHyqi)7OT?am(djWnpTXnhE!$Q!#kX_u)Qu%7+m0j6vSyukjd;4bXKi%Vly!>}~ zvOE9XN_mFzUmT@l`y!zBm$@*}H!X2+fuMJKc>#>5qZ0a>T`9K>m_4e_Mxf7vV2B4P z*urL4vMEe37j(>EP;YBchfP)c!d3jTywBf9L~Htw(ep7Kj12t8r9j+D6%oGAy|=bi z=-$F{^{$$}uAm>pe#8oLLAO_rDkJmx9FeW2eDL@r*a;WUw=BE=2iR3%Q4wo(osM(d z`h6O-LQ4HPGQpz#zthXd|2R51+{ORcN_mF+f8`Y0u1t`afd2XO`&2~u=g;rKC8d7V zDberKU@(kEBg8ak9=I@sE*6%}W8sMr^??RqPH@zv^;OSGEQE~lMM7I&t;AlnLP`DV zeb4S*Oq&h#t%}%B+CAGCu}0sjjT#&3%x;7Jk1dH}YWUDak~qwhB(Ic-#Af(*f`jri1hS7+nCR&J|{y6@6J1*zHq z2{b72bPX=eTYC)#A1nkG6tTIeg;la%tU_ zCCWwre@ZA(DN@-h+%Oi9WjoZW2(7kZDigxS=v6Xu>(D8|ZfQErn?f7eC6?v? zgnH=rC6fX#)c=n1@t=CfJN<7vt!CTJz^qt?$wh_;6f{XV1e4NpKR{z=z}Y z81-a{_!Q;-`UWQ*lR{2a6`8`Q!fV^+d3=~)KrD08kf<(oJ6NjZSM+Z)=TGZ&?{o^W zFB#RN^%2G`Y^S4-ZFOIE|Nk68b{j1x04&n~4m)}Ozk|-nF8d9a1Xl-2BpI0srqDcMA8v4i0wl|F%+|A^$I#Vo#(DUMoz%KPVM* zZ-7}FLXPj0U1NJz3~0UeUr!8NwEmBd^ZB0-4^DRepIa%`x?~&M_eV#8@LQXfxO!1|JUY2U_Pv47hl-sGM0Md0}umSrG4WJV7$(k zMIfO7L%$<;MH0l{&Zj{o4*!@5!~sXeU_DsWjYn)y(wRC364u5-D$fhv`@y`M*D;1% zgr~*Zm8*6xhPTBIR!59hC;`M6WwpekY**BG>xWQWeT1$NoBBvB-vO>a++GiMqT*k& zEGz#_5fk?h80_C!+P&xi@btU6*a7m`#DH(+d$KPXK{?0wgB1 zGFZU-g@|~Vj6r}nhvQ}TvZQAQ#B=Ldn)+lD(t6t41ny>q^+zB< zEV;lnrIw`cMr45t#FZ!Q?j6HIpnRH}wR-&TEXXZ(dMEh(2bb04zuK<9sx0>Z%iI4B z4i0zu-?mbot^D_NuD^NFZ&|P3?sNC~waaSjzt**%q%6|^dY!`hKkDw*|5nPgUH?yb z?WaQbyQI`#p!i9o2T98B1sdN^QWx4l%0#PaeK~S-wtWe+&f#{+XJQsses*Kt&7({0 z#OY;}2P@}Feu@tNSA-&ziB(Xuzn|rY1N+0Tr_)lg1(*QUhmA~rO$`tEaQ3w?fh)vm z#9YKPk>P970h6EjGoBD%hg|fZQdX1y%$B+#|Br63=>KuF^Z(dNdA9PO#^8BLfY%C6 z4gU|_;FA6y2KwgxKMXd14gZgXN*nk8NXWEN{|}dXl^fzYW0V2A#*@=^ZNHK7sb4ON z)WKwBzY&ATT7Dx2m5ut17`)c>8_8g{LBA0jyPe<2U%%9o|4``DS+Io5Z>G$b|GJ0A zhk5^x?%~lc{`Xc&UXFud$lKGd5!l`)179Miefd%$XN@A?SL!2hv~aep2)CXPhe98_ zkn6U7hJJ*&W6j|By>65vAGv}u-E07bJ9%f}F%OgY(|08p9UJKCip_YYfidd)n2di4 zJt$BNKY;hw5gVgE=%!D8B1xFZ__1*}0<_KEkU2Yid59^I(ytvihh)uDJPF*GYe^_CT&*>J!lwmP5X=>z`>1XlFGx;uQ zw*YF&c!C?@PUnAz{&5y}OKj|60GX`4)p@jFtu1O4)|90Ncz1ml6Rn zLCjKuR6Bjp{PHE?RG`UcT%cg!p%ILHF<@wnxnMIrLX|w5ax5sDfrp2Fc^v`A;Nc;i z@(E!ssgK=R)mT}i2vtS!Corgi=Ba-NXS``o%vE?QP#G>E@lr0-0?jrJg%P#XzfJpP z%cW$5$C+oknt5Z6joSK`lFKY^K$+lnV;XB1;+A}sys$>?vVvt9Ruz+^CndHyOnxyt zjggV+Mk`26DYX&G@z5BtZ#&<1=57W+IjLcSIKO0ci27Ee5|+HSjqjZT`DM z!vUjI=zUNy99&1E5xxh_b`yYyhxPwPi8IFXWRR%f9DY2M2E@EOZ!aj-( zayAU^PLL$=k2g1$*9GYL4JQ9Z@q;WG0l)+c4E?vrhqG&KNa}+lt3rqvre5;upp(4w z@f7Wb;7Nx7k!g`NoSxlW{Cqz6aQgoI`ttPb+=4Wf%kO(egY4LhF!H@CG|E0T&o9-$ z#qy|=C;)|a-+laH@b3KQ^LP0M^{!DNdD8JaG)2B3FOm z4@Q&)P*eiGxp;p*`2OPj-P;x0P?=;Dm8_;fX2)nakHrUrD17{eYaW8uku#Ho)g{CG6f69AE^PNor;K(?yiS=*n1 z(9TsanfR^c_TTJYS&m2Pwptwe+AX3CHz{ifF|j&)Ew9aQLDy*(#`X+`aEN^@FyhYX z<%QAux$`6BpvhCm zmXn^>T7h~g*eGmjF~qQ!Qa}}Q#Wh?m6WH*Teb%YZB$SHWf?7w;x0Dwq3`qzT>j*tG zl;aQmJ`gNI^V(dK=M~2^Hxec5Ti3`b^p#j|W{#}QF4tc|d6-p32g<<95~{(`ND7)W zhfoi_0xv77h17dh^5*W!TGl&iT=?C)h zb>;+N5mH$|i_mf$=aCOoYn}g1Lb%ewpDBy(|C}7;^M712Kh(#FKT?MOphQ6bl6t3+pxlK%VqedhqQdtEvM~~g#iQ)h6sU8R z+{x+En|5On6G?_9MYk8TizQt#YOQ}v#tiW#v%aQf!TRr=oD|mo$89x|4QPn zlI-RjUFoS~9|Yu?2|?nHvRmaj`i5(;LnL7Agm9rjJb!Lca^P$TGlBeHXjVp_*g7zX z2_@H<6L%FfEr^L`F`qfihv<}_y&l@IZB29!d`}rLpju>%$vrUqodfE7hygKy1azEU z$LWBGBdLr<47G;Py+y?1j)C)%BXjD*`|b7^i%B$eTpF}*qak7hS&T-v)5e@fh_??8 zyT^O#ZWx^g?enmGeieGoCfY`fQ!DF@4J^&d|oG0`FM zR^RShb9VTx$N>y_m?2mEbalb|jh4MoOXxFZkO=La-v=-fv;lpe-kp-!wM#=}?2_|W z@#>}4`VV>di28r;=>w0%|*+okBn*#L;M4yQ8)+BMy#e^>4UW8vEtG2cjO8qL1)cgtasIgyosJtdT4F;i_|#2Y|L z?L$)`;;29|jZIEHyi{H;3X=Bp+X}&A30LvzONOV|M`Lu(UFfT!>;olXHvkw>HpHHX z$n}i7!Z+P}vHwE8<9enV5l{JRX8iU7c;5%dM@I*?Yfpel>Mbrut`FWvTwF!uRAnqj zpCinW_mr2O0IO?J5@7_Wue6)?yW46Alng+-eD5m zX8|f;r$kOR9)+Qgq<|0obT)2|Le|NBD?MaWzfcg0*;~x?BxO`57E!;FQ^*$Z=pDI( z%saij&=utjQZa}m&~>0QX&}jLmv{f(fm)HqeA}g-bwFx zxBqXWY=2X-=TU*#R$W6Em=2PfiCVm_oL=1Yz^~2bZ^hlpILT*sC?9jNg%i5RE%n#` z6r;QPF(2%*(EeY@|Jgk{*v0?aN?Bq3pZO6Nh^@&6^mN+hBGwyy07I^Kdg6r?6QO@4 z%*Fhqp7~1(s>vU1pN`WXA*1&*_0!xbl;kq(%}0zI)}fSw9Pbz#K|=8m`Ntg;?xapH zbjy^95oJ;Y$->pi;7mm>@&M7jKtyMHdkvZ}40%}-9z+2p92wH2=2mWZHhIql6lfIr zyucyr^%9x`VNAwl*i4ecZ z!RWI`UcFjWOGPMzX)G55zp}o~#}Yizy+DjWzZEgRjQOPvfrp2FyIo#GBCMmuCb3$Z zW%U@PqUvhvdi(Y2#mIY{@zjKy-XfLk>ma;y5(7TZeYoTuJKzuK~l{AZ%c zEiDEvI{!H+`2Y3}I=l0qt&~ObUu;r*_F~|U3#?RXt$$;JK`mo@X}QeXT>k%V=j0%t z|9f}-yQQ*c_VKX*FMUM7OP|u)CgKyA1=7LK)7?>H1UZT%Q71gy2Ch<*9(7cNOI=gzy0b~dSm~%-t zaH+UyY!(G#f`ChzPWI?Y)X%Qo>J)Ndh(?ql+kz@4nCq908peg)vwl4F)ZLX0lv@1% z4h>sUF>NKDv`fi=i{!uKPM-hwPL6l?KetlqL@h6s56_Zo(?!uRAE$gP0$_}Sj&0rt ze=0zCn0WNg`Sefx&DUA=p8+mvHMU${^RA33JRh14+HFvRU?x1G9* zx2w7lt4h4Bf=VgO`zmLSb{f}9>&5?iHW@QZ*S}!U#bn$*Qyaq?kzaIKwEj;zx%(g8 z-%iQd&Rr-0hYNW0pijr~D!{ZoQL!tmn03V}l=p#;Npx?$HSAE_p2cS3<~bG&!{ldI#RF`3h`S7j`hf2ge2%bt3*=M~ z;GYnU5d$|GRxB;)6X=L*sR;q824T{o1OX2P)M)^4BZVo1jIgf8pc>@dN$4Uqq{cqn7jU`nlrA;02b+g zh4@e1lkV>R_jXFi=oEX1^?{y-4VMyu?&B0K%|<4~Ir#F>$VHAT9fK9VQcLGGe#NmS{YW+N?A|?00GAda|2*c<@>%Cl)*8&nT1H_i~=qs zV2D67Kmld5raBvJ4xu+jtf|-Hznor2%sK zI_VY4(n8T!H`fP8Kj234)5^Q4h8nsq;`}|8ljRCQ@81jy^d71ZLb`(*lK|ciB+cYx zIFgIA@dplXF_CXSM~Ka|BgVIf-EPCkZ~fe{vK~U~ct(Af9Az16$i=0UKh0^P2HHJ7 zeslbc4U=>Ef2sivlEe=p+;INy&QUS`XLslSx25vsOM5>_$zo#RDo2A%7+{aOyzNrr zqEPU5HUTN*9FIvWq+Uzz6>YHJHr_@a8exJ!(@xf6{40w2Tau_{5yNDB?M{#v`Iw9^ z#)Kx1&+m~NDVeRPCE6gbDu7u5bweg)u}w92Sq06sXsLUZq^U-_G~bBaSM7LfP`H!x z`;Z~dvk9+FGBD-Hk{wpLyJb=#AqLfkbz$eq(^6nRli9@>IB(JHHTY% zLblON?mRULao=(|sA>^Drj4dudGhdruV3YKn=@sT6ZR#CdC>s3z#YQliS}GbYowjf zixLsnN)5Zci9KiTWu{mSLg!>)+f~|{UGqfgs!9cM+PKA(q;tyI_U5)``nfh2&NWhl z5tn;4ElYq+F!WqQi+FnVnyWc07BYNm31Z@*``6$bEKp#t!a9T6(x%mXqN*B8ORfgl zqBDnOC3WIQrA%K}(JL-np|}ba^PWOqotp8`#{x88H|Kz`=_DDIV#EqS$<8vZy|0u! z6f~qh9nb!ip^o)*{=}F!8y4~7Ri;B>t)x@JN>L|krSz|uEAwVcjrp>~jHxka%4W?9 z^QQL9Sul4hW>4wk+A|0wn`O7tnKP%>lmF}Le-q?O9ex${ug3kaZeIWEo$TU2ZKc=@ zNt@LMMjjq8V_4H(0p)?>%^^UkK`!|$WSEE%X#SnIn=1i8vCoJ6(M<7)y%Oxd8&acf zoZp9#c*;<(QohoWc8pL5T8)B+8XMoW+gdAqtMvta4)#F;N)!XjR0;kBfl7%I%2#=nt39! z(a*2|ci8vA5P^tGJUM_0d4p{wX;ZJjlG&6u{w8%QE7T&nJ#D^G=-XQ0qEWT*OemqD zWR8AQTr*FXMlf9^t-teT4w`OvnR!mH@&9d< zvMiQ`43_hqe~T-8Wxrn-hM8Lp@#$176HO4qLLCg)0FA41s>7Q?QpE$p6YFMlCU7LGmSLYBmuYh&^|az!>59MHE-0wg9V z(y8NI21As|Sy!y2ucI`(hrQ!(n|bE053RInlDUucHdn`6ebAIxn9-ag0y+A&Sq{iq2H*B-U$D5nW!R5!Ro6;t}J?wVNaD0FMUSjtC^#2*$ zT%DesUzdJeoJ{&i(@9S=b5kpZ|6f0yU;Q%p$LaO?;PUk5$1>=WeWwf5I`Rh(5B7{K zzA;zjK)alssVaH;`FM&5;{1})AEJqJzz_wrC(t7Q6s`SK1 zjf?tKQDW2KQ7byXR6@-f@E#|z+1E94YbN!q?Ci72<@r?&$1Y{)UnON|CP}E37W6Aj z16o)9o4Sve|Nppf{@Xe1?aqIR}a$iWLb;ima@V&pH}8z@@U@^y4e z>D2<$T&v{Mv{K2oSiBCATx$68r53qO{l`^=Yp(ybB0w%!|Gkr9{P%-h{{QWiC1XFv z*AVNj*wWC9^Sp}ViB=lAE_R*nSGhrFtd373-4$Zn#XU*`+9fgUQnOr1eUdrSD-HN2 ztJ&9$Y)9&R#JFtGS$!=je+)X0N>YB`i|@wbzzADhxsv?1x=sa7FE1Q$LzOHhvEg?x z3^5r48VOa;%pNd=?kytTYjCH$NC1pyoT7$j;5o|%Ce^pPR@zN+05&-xHZs~8Gbg#N zW_hHRhY~LTV7cL5lKf^vcd4%%dFEg0qqp-G;r}K>StS1-9UK+zfA9AH?UX%m35DE2I1m)* zvDR_+hY|Kw{HPGRw{VQO)7aA?4h(fiXmWZahj#6S6zv{OKGH;O_q!am}SeTUzLjeTbb zZyWoL2;|?C;c;XC|7+}lpCQ9E;^5-#Id3>7Rbj)y9)fLMozdSL&Xl{rro&LX_5{q<=#wobcb0H2E2RZTpB}#HLa-S6)Ku1i; z1V^DNoQk8Fga)E~g_Xe*%xI*1keS@~A@Sm5n!v+cFg%O|@<4!HM&lStPNVU^!T$aS zDo}raAKXlkL<+k$8nHlX<8e2^?j#PxlDbrnD6_({IN&FaRNsEh27LML<-Fi5uYgD0 zNZA=xTY>@)>1+`wZrH{}_F54^B{`8O%xGlepDc zVQyr3R8em|NM&qo0PMYMd*e3JC^%nNzXC_jJl*ZnqAtF5bT((#<8~6y^u@33WHxV} ztQsOA2{8$<0Z_Km@&5LGa3KlaBoe@5CTO?R06o5iip-{yjaVLniFc+A%&nA!w zX9ffRZyPL~PN#Exc&PsGbUOL}y9Xz|zjY5ghbNtb!{grJ-{h0SlfQw^CXuKr5f_mC zt+RGp`ObYM1rkC96qpj;Zvfyz0ew1lTquN(98B9V610U07tr_nAf7I_82Y%y5t|~` za+&vC*E#4p-A<l-*)7b`)F(Q)h~qh@dZfie1#6c85s7ZYeN^ zd;~c%=w}vIMg7ka;)1u_FzSPoZl}BlNwok4l+F5}chEh2k4x)DxaHCyqy!ORh|_=} z{uwcjDd~f0w*kX2`Pp^eIL8eSahKsxs7I$@2sw5&^#MmB3W1nF0n8NO;0}uk5EG=q zyTW(C6&fK11O*czLf&t;WAe3qC=eIz1^^?} z>qlH52116ZVkYKABis5&GUGt4vA6dY zg$%h+AP+ze=tx#k=thJ`VMv*Lg9+$5-@XO}3Oq%?yZ~nZ@W5~e5P?HqQtOjPxTAg{ z?>kAXPYul~DMOy#X3g8UHvr%Oj*)y*-kAV1CckbO81et=1Jkc#r`G@=^rJB*7d63Y zBE>-Uw<*ky(=#=?jw02EZ0{9ICmcY}i%ImRQQpJG5OS1xE(z0UxCEEdqXyPyXj?A9 znZa!U;M4ItG(~=2ZVYnsMfa{BdFVqdI4BfbdFi1AZlfWJe>c#WAA0bOlo1<7 zVqz#M7!+tc>jOwAnFTcB3gOVFvp|Y+s+O5tgTgFaKo^aT`ZuM@R0-@IwKfD|1o{Ib z<8sq!Mgt@!C^EkRa3d~gFp!f6yL=EazYm&HhU>T6anA!vBpR5ETPXk|pxQ6>+;RBS z?SI?(w$n6CY#ydy(8XjN_W_fE*=Mr&n`(~D5fr5E@$sAE_U{ml5z`RI42~dyiy)R@ zZ@!$5`kUIss12@R5uq@7TuKDG7ne{>`k-m`RR2|D-RX*^jh1A|n6xNC_7d!MoSnq| z97|UI7bpmo%GMrCkRKw(9TBdr3M`QSJ4Z(cIr;zi;IOxo|F==Td};54bHXErKw?(` zm`l}P3HBTY2%srqv%3jm2<*2X9vWZ1wB&w@2?9;6o+sZnLF?h60f4OHTxpYtlL%hm z&v+j;9~xezzmne|;;FwgZRkn^^XY&YVI%ztkAU;DR$jHR5*tNIg_Pv8kYOT5p!s*+ zZmt9X`5Yxbn#Eq&e?Rq5mzJva54`}B7Nb5|-x{#U{(Ia#&aeNIgTvkW-$rS*S{VzW zlzJU0nNo)TPJ1ZBM(6TTHw@IMd?J74=agKmvMDC~3%tL*yH2u^J21>4k3?0WtF-{&BodXfUu=O#=9&(}eVb%{p84v7#6d|6T+U!A@#9qV;dsGWDP_FcJ*0GIy%~VU-r5L&+V`OD*iu& z9JTavX$3G`YZF)`|MiaZ{QuJlQw(g95ms zkhc;T(~Y&^lnYZUfl>psE|4;zrw*V!aQlW=kD(V}E~k;v&F$$^Y#@jP6qt_)Q%r2djnQ2I2GT|`J@(7q3&M_x`>kp zQ9ubtLRU@C0Sdp{BTS%=|IjjaD?TDX`WHi?k6oAm;sOdZihT26A+C|2+=V_S<06iV zM^GZ5A5t%_snJ*jH#Jl{E@g;v$E88B1taU5+Bv{uB=lG?1O+O#p=9(~5oky~FFE}g zs?(o3C|7dEk_nY=oR;>Md2U80^Ose35i;t4E#InI9RZAI7x6-h;|YVN$=pcj6 z{iBWUk*j~n9f1o(#FY9`fLtHqfXCc}CG#oO#*!b zBUdQ5J>A~5W)`QKSyXe9|N5&vyPzego%Mfb{oh&tmnv2K-*|=++zR=6*8j!)|D>1a z|0jo?gB}0hMk#lmQ0zN(dz9SP-Xyf+rlOFuFZ>$xm@b*DPRA@ zV~@9zuQ+;#NpxTRJ_Mrk*S=B?4lCSi<*X zi)#hd;-WAAWGPqa@hBJipgL>Gk>G z!|D5T>ooxCexSlMc)_JX!K^zyEl3c0RcHaD8+7?p+mn zrtkLMG28c4dA~QopGu|5;qTHQfN}x++H4OoY4b_*b+hFvg()tOjg@BAjyP-k)P;T< zhGAP@q~&cE5r1@as1pRXFxta&JjFL`pNE{}$~W_6{U z#AjUqEGM_APzSTPs!_D7v728p-N_m@?Mjwa710tos-J0pt_RC9)LBTIr4TSs8kPO$ z8R_Rkg&DI1{c3SO2CSuuGkpmpMK+a&LATS*J~lF5`n-TIlR7UGS`Fxf4`_ z&g%VIRe$k5;zH@`A1Jw^RJ_RR|E6B7)4!Ob|K~xz&`%*5+eSv^2%mD=0N(e(@zK%2 zQQn@hJ~)P;_mTKB*)ipMsbtNRB$@5D1zseZ*^BszHd(w_QRm#1=TqkM|MhLZf2*PY z_wxFG_xSi=cmHoIWhMTT~A_eOj}>U)TpCEn4_$gk<42#sa+F{@zh%zPJr(b zTm5qyA_fIzElE8UI-&Nr-s-KI1HKj}lKOOY{Nu&$ui6jGJ=)uqPu3DN)FleG-AlmXEYZl}J>IqMswo zkoT6+@C=48z>!bI>CT#GIavzCWp|oW>ojKrrz{Em2dc3_9QJ5;@Urs3%YXQ}%c{pN zn>k0x9h-b4-$U$2>{0KrRPF!SiLzb@fCc-1=O7>dsnzIKT$^VC)lfwC5cNhO@D z(bSh3&iQ@Fkh-g$9C3ZQMKcS6i80#3V?r5v4Pq;4AH1cAf1u+09&_p#zpn?hec)Ablq z#KB~o^h_HD-)WkGUk3v@EQ8xI_svm60cd+^fvpRbyA@lGh8$CRPahi)tsYSNhjNWNKy zIAzqB(?WO1@oAeHkwt|ut(nXX`;W(Rx)n!RJ7>2*Cve@jUbwOThMN$G( zk*j~Jxx(@*Nv)HK5TO+lw`9dU``C=zC=2#~bD*-`A@Cykzgx)vb8@n?|81v~O8B`$ zGn*Gxd-5#Hhn~9UyYja=>wk*T-PZ1Z9~R=j9`|jcLHytZ7ksbCx-|5`2Pn7ot^*pR>~f@(lfwh{JV%H zI^JlUChv5ZH4o@qH!2@s2;EyaMm&=W5DYPaY-VNe0#ghD1QX;3`fI1r*o#|HD*#++ zUmF$B4m28jdqAZj1#eM^h=+(;V6Dp3lNzjq`cr%!SO)io0;JO%WTrew4iPzUDPzbL z{_HiF(Fg<)7wPNVXGxZ_qyi>mLm$sV9K7TRfec>E#%n}b$ycu&a5KRiU=D`Jg%L+U zqF}`u1Oy>Zwm(DwP9gT;(3kB1$~>KmF=SG=_Ge%iVc%23lZCPpk!RA)gE>WL{H}G|pw$v`jeKP^!ct=!X;7ZGMtd@YM4pPnVNeC5UP?be<^X z;>HzW1O-x7Q7PsENZ>fmgO?l}d-28y0>mdkOc;&E695T7kgLTdCxjQ{C|A_p-X5r0 zFe%n%rcnl{kjwfqF*9anXrrMfpBa01mgfFclgPNn$r+)kXDtBLTL)ZY;v$nGOU~g) zN=Yh3oElKgYA^vpwxf`G4X~bg9#ts8|Mo<3gfWj9_py!tQHeQjn=7G-fcI#7xXcYbHdho^g7r zr>MxTQ{dwO3yqEJy`akd2am98RW-9?1%jxwj|xBNd4LiG=qSeovjJ@-2G)4WKD(3% zOd^zUj79xdL!NX!p@st*jjkhE6M(U4Cmwmp3ofab{8Vew&IFWX`_&d-JtB9I2t_hA zm`(zn$lj{`3HsOSor&Krs79`YCB^zr3QtX)xH(L6Gb!PA=O%Am+$FAG>{;_l&GC3~ z=hNI-WBCb0%#V{E84_5jaQlr!H_0wxLT|QMb`3Ve> z91cT)s?kcej*<~{;sN+UC#IWEeUrUzL!^&$;nk#jTzy+aFKcg;6u;5Q_vTaFWa!fr zS7^nginAYDxd1=q#w05`R*g^$sOB)lbz-u_IA7}Fum`*Z#T5svbb#hzGH;*^TIndo z4>Cgp{;5(VHX8c>oHQsuL0TL%T-d|hSe!f%V3*Nk4D$j8R@rwck)x%>8rk(U2cu~@ zCm^Y98gnHSeU6~7a`P(+g^YTUI^=Y~Ph6*V{?))RlkB&bao+a3yE`Y7{81(Gr!H@I zonE&+*4d+5%Bc-`?N`8~ZlrD`C^Q5G9ukNJn4lOZ8%?#-sFcyjV5dl*qNrZ*fDHn`rx0qMtBu$6#1){V}fHT6j|?@|k4-dJQmf{m4^F z2Z=rWzzF-uXho7UC>Ci90rw{5LVS_{E(Cx+rv?z>{l-4n-~Ux-Es7C|w+frv zYwi=I<{zcErW`Nf)~`XRl|Y6@+TNp)T7ky~aHd(Te1o8|CuFS&js5+2o>)5LLQBH_ zzSc9X*UD-DnPo>PGa;mVRP2jptB4mxQ$Y~{tV;eD)5!Pt^-Q-X&_#B|iHTlMYQAd=CNzBAp7ZuPeVrsBuCK!vG)Oah zOB967s?RH`a8G7iNQTU4>&uK+{K=Aol~ws4d!)t;l2Kmet}{wwyLU?r(FSc0$>12B zYnj2hu{F^8gHx~$M4<4u;Y4i@yqvR}c z8rPmOc6655cg6_MnfR=BJ^R{;x*wc@C@14QNNTd8Y=BFb;CY|81n0ld(S3d}j!Ln| zdfCU>nt3>LVg?yc^x6&Os?KjI8&=3vxP4JE@{Uv>rD4(UU$;BI$YBg3aFU7gzS3xX{Ih@%==hX$bL16 zhM6kbxbaqNl~*PPf*qCFqwXzY4yH{FAY^oeeJfa#?BO^AGYNvg&=hMnX1$Fc>M8-^ z6RrN}ju@!+Bh}{p?0v@Ly>L6dz^ru9Arju7-<%HKez+c-onGGjbaig|w}O1ITyx(= zy&7kD0Vrb~=^CBSUDQ;Vg=ikblJ8UG%eY5*Yo#2fq`5rXvfI;9<}w}qu%zp6GE^y+ z>jd5~aqkqw>ew5ZTAA=0>d^obEH;mi0$UF$T{VMQd4NBIz$NwGCh1WBq0*riCw6t+ zw6A3xVTGo0k)+F{2>J8vhwF=VqO90LhZ?0Ub+av9`q3DZjkOo1;N}E*srn)_vt(}MFqu> ztCkZj$apy|gID^Eg;{Kx+W>A)J$~z!?E2?02z@L16S`Lq9y$Nx@j?Fn=gHwN|L=Cn z`u4xPp>C0B%LHw(^h$r&%BqvrJJO<<^-uo#S~Xt>X%NQp+P{(2fp{4)WNZ=3{7@-uX20oH3CNBQ%ggWfLx!&XWz4zCLZ^y%2q%Ua#nk35WORx>#9QMjv%@GQ7vQ)v?_yplOrz)&U|OM>EZ>s$)P@otxMJE~ z$OucmVLH@qOEZ-+f@Z@m_8Kq@tr#uNLC@(lJfx%Gs)xx%J4gTx5ix1V8nZwZ#Ml zC}wF~PIb49gRy=v@adQiWX%bJo)eOB;}*^CDD#vuq@!K6j5=gTMvyPbh(Rt`9D z8g=5}T=Qy9kRRxw=Z!=yQo+SmggRnK1o2rjQQda~g=lcvHJZjwYAPy+xml(T0|9hh#FZPf0w(vD#vK|C7^PxBlc7~AUt_Pa2d?#vEv{h& zIKZw&pR+6DO%53XD*mMx`~D>%q2*|Qpu%87i9Z8kLV4opr!GGXXX>LLB}LAR(4a?q zf*5iddyR$;=x>Qv%_i7O90f23cd}|K_$0zeqakOX3LPCHg1tWY@?d_b*=c^qvs05* zk9L5FI`lF#Tp_P~(z%|Y=3;_G6$*iZP^coDK_;u{fZDQwv3FII|EWkPjruhP( zDPl7*Q8)2ri-|*^6lfUr)v)Sj;{hmub|KNN$L@j}gTz1s%mlzgAEl%;;jx5h3;LzXy;AY#wOgZ;*=T`g3y<#rxpb7cYJ@ zrlR!GzIrwZjn4%P@gTMGN+pE5(8pxlp7w@Fz@GM}P(~=-tZfC3T_6L@7-h&a;&p7K zR>yUNF!KFDXoBrGPyIVMn2e}?Dw)+_gpu#1-}E}2<5s8J>hx~9NBz!WzjO58os)j2WBRY`vuq$| zb00LjDvBEQ&>#uLYUaOL?SmL;eRWrYNJ6;jntCu(r7(SJ;=7p#3{!^1Y~cEk>u9{G zHQvWACh>3wZb&$V8hNn=V!7ZAZhYg>OF&PMYi--zH zjmHF&@jwN^mCQ%$d@8H0GA8vp9hDKn+$bqbulTvH{jH7$9kca#n>eFEKqY%H3|;YE z7*I0SN{v+5w6NziQg56+aILnLS+dQT03sJ^vyQ2jxKwL*VfA5&exOCl#F&gZj}UJk zb`OvD)ZZ=*B+G3betU3qc+ffMX?sK#G~H%!bgU?n#qY#)JLT64vuB zbL9Ut80d$OH|N)mxEHInvFQBw@bEAn|G(4Q-T&Q6`L~ugrCg%KhyRg^BF5qaC`oTZ zq7nmzy1tl7KZt=}zkC6XHXolRwkz=P@S75chA54zq(#W|a@()@WK{a`!^3at)^98) zG6cOD;8cZNdMO3TzLq4vvzj)!tbTh948ZRS(DiGBp#80d^Q*r!2dTCG2NUFnh;c`R ztC#}jumA4B$w?vq-^szw|6?oV%a`^(nBt(XZiq>dMG9Kq1(Gc81ogpw`{AL{kkuOJ z_n}mWj7ZW!YK0nnHA&&hM_{+c1&u^fR0?H!Z7i z02T5E+xgUu?KLTdE!V}oF-N=d$x4+q>X6;%7Nl=fC;H#jAKB#7kYOT5p!s*+G+^ov zde!(_gq1Tk4&Vs01_)hC=?seqtg5EeO>u5Xv@@N}ZXkcgxk5nme>OpL&|G>-UnqrR zgiWZ`M0HFrf>U30aY#8*Y7R(NDY~=jyOeP3A%?s~y^WndOQ%z6EO9c8##QUqQxj;; zM_P?`Qv6#Mby}r^uazFHr!MNEs-!y@rw&zvtUO?iVWNBzI65ybkZ=X>S%N;t?1yUUTZkZ<`|9bSfw9@ zkck!@rM7{>D8~^U>Wy=r%B|8?wK<~Ba2CH!u8Ef>QZ1mb;?QKmvfxpAc{?#afS0;E zNIPSBg?ObGBMLvhF}BqWjX-RvtuX>=!+C0?Z^}j>eY?pK zC~I>&BY;QXrQCI2O2PRGjnVykDBOu!#rkd|p)C_}+Ma-~z{{bO3v(;{6?jRgs0%IW zb``W&uk2nZ%77+{`9Rtui>6k^W}(kT`OmI8Pd_JYKSSZ zW!16Fs->K*qHa+Zo>Q^dZ3c;V>W35fAqqhEc(vZ&p&?+DE>xk+f3c~zw41p)pxD2B zjjRmXu8{9#6K62g9zz&$=k)Tz2m##r5pvK}CzVce&Za%En)Y-uz{}@aQyU#B=Gs#9 zj4kxQMNF%-4!BXrg({v}T5kj>0k?1X|I%lDl%%J;3-?pghLf28Gs7`~{9kC6-pGqF zYFgC`BU6O=m}8;4Bb#aqpF6IUY^z6SqvK)@N#qiL?BU6u$J868;#bv!m9M+SF;l$m zamGv^5?SF<>zutW7D6+J`lSpkSEq#V#R^4omzt25=zhk7C636Gp+Qsa5KUzqPi;g} z1h73}mYK7%O($-%ls1_fC9((lTv63R$w(HNe7~ueNmfu8LF|L(FEj!k_EdaCgLuX- z0hf5~ylk~N5+DctJI8TeIpE(|m^hm{#Pn-z#E4PM5no3Dra~oqa=(*sLj_-Bl%b2n$Albpii;&X?FzJioHdR|V9xMlh10KOc+h?5V;2j5 z1~Bokwzm=;n=1<|31qn3n3am^_A0Dqk!BIs=9B5tt-Re`vTjWv1mk^P9tEXnS!_2~ z0+2DQHJg=IGn4hU*7=VMbq1FeeL)xA|2@pbf9`cV2M4?JpKX+N8CO1kWSS*1b_py= zWnF1#OV2SJ@bJ)z=UpK+WcmJ{O+l%8R@#c6SX|Rs2!|Q~%-`CI1N{w=fLQ`y@;h=x zf@5;Q#c>>)YdrM;FDUiazZ%N`hO04vMfbms4o~vyzqdR8-A;MT^}DTQ8zX1(yJ`p4 zKND!}eej-g0cf%#tK23y2>n^sdl|H(FoHu6hk?IEGvf>m8PXBLRRXn9?mH#fCuT@D z0R8U(4R9jJfu?=-*#yn}iEGpJHI>FP!cAKz6SI8CwH(4N6>X$MVeN~oVv=K_&!%le zF##?#?!ua^I*Ep?obALw_CbnY-?)KgF4jGXV{uiCXM*%K4HDPy0?{N%$A};)*m)Z7 z!1(wJDWz%^+lBsXFk&=FXjzS?2^VIom@mucQlXxc(R{MKS5j&-Rn013<`lR^5YycR zG+iGfA_mwSgba=FePtWg0-FQA)ZSD@i4sdzJ=jGn$)YBTU(9RCvc!UB#=cw8PNh9~ zUMpr5G*h!`1@waXH4D|mUXrsg$DX7>@YHO+veerD)3c$rO6uH~t8|LRmH8Mp>g4Gh~LQSz1WQP?bhd73bz(Pv!=dxw+UOU(~ z0Prw(=@hY#vV#h(frdDULV*Kyn;i=jm^(GverTroPNBy zxm|3B#T^80`H&g&wJH-%JN?^QMX8@FM)Q{M& zA1I@dz@%Ou#&j?)e6V$#mCR(sFH8xrr+_^w?YeymP{0KgT+4KNM4o!W{cx7)*P>Bz z2cF5*maG_cGyds1@B!Tm@Wa`v%f%iRg6@Uv;&ztGS0k_N%4W;5@}J(@H*5dt9v|fG zKZnP=^WUwMXDI*0Q98CS0&0Jm3ln|Q5(gIudZ(8cz=%33p|9DMa@&B}qv~t~`YZ^B zc#wiEY<4A^!US_c#|#Gbwgz?BRJAW$#V^bI{Cz~UrvDf{AJf6ez<*o{#H~~j;rrZs zYg>ixEgV8*=ouMk57V~aPfT0vipC4T@@A;u~ygV zILEEur$H;E)Sn|0EZYA&y?p$SqmzSO{Ew}aXSn}YPOQ|i-{XPu_!)P=@OmpUe3q$B)Vc9$uo)}RdXb|QEM_pQ9^{m7~$QWNFwDr|W>{Tn2 z)UV$6?C!<1*+Ac_i2bD9vyBmJ^sU;cv7yfFHt7G@vWfkFkvU+|{(sWT>;H!*Cx^TJ ze;eg1+5ex*)<0+07ZZk{#|OSSwqCH!->4IPSs9ow>+TBqhRSz^it}-Z0+oAzr4~K) z^|EcgqtHVhP=P}0?|r8Fz*k>awOQHd`^`vB8x@%#L$GQiKDFM}*|@Kj+v=k3yEITi zsy09Z4N82SbO_-621pRV`)Aqtb;P%|_Wy33-M@za$4UPF*Wqz*=l`*l^4$0T?pL_` z53KEVffTUYrgq!Zf66xX$fEEd_w6=C;#J$e(Vqb>;1c5;x2?Zg-1g$pR*))(Pkdc2 zt-G>BSxNp2869CC$?dt0E65`I&+$noFaI4J@9aNYDbH2@v!>XSxPrWwNL*?p%KU=u z_j!462s!F&uAJKL7x@ZDOa-s;=NU^%JtkCZs=oS`4O|V%SWZlWmIY@B71vM52GG$b zGZ9%I9lcv-k5%UFm8IjbLW2RkU#(FoiLKMH7A{t6+XyBrHGdWh(O`mLxT;91uqlY; z>YmmD@w7rnhW8u8M~=+un5Y=On?gk?ZHsYvB=Kvl{r}#nbrpEQ{@*>x=l|~=9(Q*B zA6qHUegD6Ap5`j>+WY_5l}s=6kw63P!YLwS`zU{bIPeRN$Qb-^2J9QabG<1KD*4RV z9n2)SfeE&u94HfvRMNR>f6BRhFu}5Hhgub()iz9JLf9C+N=9xSIwjaGO{aNNXd}DC zvizS=5BF#Qy5ivZQDGL4-*WCWlkCr)unC+OO^bJ{%z*`X`Sw! zP9gRsqk6PH!nlR)bo8;U?#u4~pCia_qvZsEMf%@iC-47v&^g}4f80uWhWo#nVo##> z{g-aLOym2n1g%A$eM_1tr-*ozt!1cO+aP6duwyAj9F7+T4ox^ ziJ3De>`pT6vMm4Sp_1y6&v5-GQ|yV>{f*Uryz1jeRmVWaSYdqJ0_sJ6A=pEO zNuuj?t1T(!?xY29k=eQulQK)m^XE^y^0;Mb{$E$r&4<8zSjR5Du+3#G^~MJv2DVE3#u>nP zoiB?(LIH+;NA8Lwh`*grgGwC!F%yUbj*7v0u&5i4*r22{bq*w~jfGU67rgg_c{i_P z47mtTi?=IR?OY6Riyf?v7_CqOh%w4)iAUM4sPEPfp}6`8T_ZO2kyyS1Tz|N|9_&QL zzhqff{+l8u?pr!vU0o?~q5OAHi2u;(?)3kylxHaa*;DL^q`;pM6V81q1?u16OyN{* z17PA_rwuas9CP`Q7F_|<&>7+(B^=dvWv{RUJA|b}p&}X%bTnndgxqMLvlRGrz)V)RZ za&Z+Ax#MSRqcW@%^y@l>7V%P_UYBb=nWa<%vnok_P3*36V9PPw33&gBWi|OvIol8G zx&beg|9ZV*{D+kt5+biE6 z7Yi)V^BCg1O8!VFVAWgHqYx1flQ9So=Wx8tUY7LCfOu{lOH-e0LRwFIo50J@n zh$R=erqq)3-H0r3fw=Od-MwR22$WBAvsRD)odvnYPVWT2|KPHk{8!ucSCz&7e|h`g z!NI{U|Jzo|vz7mz&hkV)wg3~L`mmA7uc_f7AI`q^C2)l} zjhKsgCNg|YI$-h>f5sEy>yV57Q_5=cpV?A3j8(h-=!$9A>|A)cmui^iZP-)}-9|@T@>i^+VuW~~?XN)pn*LZTeuI)EcKK08* zkvf>H>^EW%S<7$4pt4cF5rfy7ej^#oHt08EW4H4g`RkW@@*fI)It!L?`OTF1@?ZDx z_%QGP(LFre#sA()$;)vt40(InH3Hk)WZ+BWv@c&OkI$~qg2i^3^Pb3L589x__@m|>=qVGE^rXZ#hsiHy zr!g{8-Dm}gDWx_-IUX7#_HF0e&fLuaC?_>c5a*YS4pHA~RO0dj680kiqy|1`q|JYK zXgFY$3cU{shJ)*9G{X0w*=_>x@X($h=!?l8Sv&tYRIVBb|$3;vGM%YKO zLC%K3-3gK;{_*DK^11*$zrp0cD1MM7BLJ9Sfua8v`EYiv4M}})WK{?e!_-S&9dwd+ zKAxi85IpG+ATlkohSRf~i=WR2A5Pz&UtgY{om-Hma`}DFXpkM75k|gug+|$@=J}-> zxL6)_5(S{p?z@ja4BnmpeEu%qpx!kqBu_eiho;EaOtx7H^!txj?@w>ap_Xg|J{^xW z zqz01d%#`|3fZj{tsah(W|7V5 z$8ys1S}Ra51sjD;EruBOQVOU-uDFKFWda+%vd=p8nS@f2TTtuB`Ihp+gdqu`VjZD} zhI0I&-v@$4XkMFZ^1R}h=0>7qed`)Ig}xH&&CHRt+2#66C=avB=s+2GSwb~98c9KO z<`C+kSKwtuwUBzRs$L8r5%`rgRbrTxqDNKx87-W8=(H42HH*&ztIg(X;TO`Z$&h+0 zV<$mv962=#c?^lb>tLy2)<$&+dzYZT$qQ+1>efSh%dN)MhEi%}TmmCGWd^aqKm9;H zzRsK=EJ7*^Xc1bD<2>?#YOV9XNeEXu_%mhE{hyPAeEyG4=b*Pc|Jz2Xp7L_gd5GLH zZ?Ez_R(=6PJecZg@GV5zv7Oel?$w^`TpB(Pi|5l1E`mZGJ zD#>on(UqP$_CY|NnGhuID7#gjqi?tdJ46D;P6!to#PjDCB?r!iFcZlCg=S^+iLC>J zm{4+!IdNA((}I|27W0|Ie27l@+3TSV+tx()!1t5^1FA*FnA`)y-#MVZhZqnONI=Kw zb({`}IFia(#87Jp-CIOF?ie^PIWng{yx(q*v6w_d$E890HX0&Eki}?pJ8jH)gn0Yl zuzS3x{&r~~yVyEBc+)#P`S$S5tHn4P#31p2WM)jEXmD$Vj0);f-?0a4B^6k;8qano z?UFjP&k`i#J23CNJeGaXRL2R;(veyY1IvA21r#g{Qu*2X8Sk5`O@Gjo-#6>*gyoaH zY;VjHj72^&&e(EJ#?pHT#l$+mUj6{eQh^NdvW5y}m|4YX!TNmeS#8?aREMyB=H!D~ z`#*(|n5?b=ER_E{dH=u8!O715Z!2Y8OIL2^S2Jgzst+ROmu>fYGUZ^It^VU_Bqll} z-s;RP7pRO)=ztOT6Y6*SD3=*Ne^ZNisf;OP<)4NkLyLM@aj9qg6 zDqg+RTK^#rA5s7B9UUGQ^#9{s{)erU-1^mV(H%B~t`qX`JeAt*C9Gb{Wi0i2B}=A~ zyIILs)JOloDrVOjD>>w0%|*+okBn*#L;M4yQ8)+BMy#e^>4UW8vEtG2cjO8qL1)cgtasIgyosJtdT4F;i_|#2Y|L z?L$)`;;29|jZIEHyi{H;3X=Bp+X}&A30LvzONOV|M`Lu(UFfT!>;olXHvkw>HpHHX z$n}i7!Z+P}vHwE8<9enV5l{JRX8iU7c;5%dM@I*?Yfpel>Mbrut`FWvTwF!uRAnqj zpCinW_mr2O0IO?J5@7_Wue6)?yW46Alng+-eD5m zX8|f;r$kOR9)+Qgq<|0obT)2|Le|NBD?MaWzfcg0*;~x?BxO`57E!;FQ^*$Z=pDI( z%saij&=utjQZa}m&~>0QX&}jLmv{f(fm)HqeA}g-bwdx zxBqXWY=2X-=TU*#R$W6Em=2PfiCVm_oL=1Yz^~2bZ^hlpILT*sC?9jNg%i5RE%n#` z6r;QPF(2%*(EeY@|Jgn2?c#rJrL3_2&-{oB#MWd3dOB@$5$laUfFajAJ@G<{iO|0i z=3;(Q&-^6?)#Q)1PsizxkkR{@`f2VIN^%+Y<|D=p>rhHTj(3cWAfb4O{NoM^cT%Sp zx@F45h%zaHWZ~*$aHb*`d4T9%Afhw9y#`GfhP>cm?Ketj= zwEwPuwEK+Az*{^z&h$DxL#{kQehfd?4EtVo{Xc^Jx7#_&umA4R(a!&CD`h?XZ*4a4 zWUJR!V&`4_l2Xus5`Q)bk}HTS8oIOor>oouiM?x|0pHk_zecIa|D~$+SpL7=ljHpP zU+1v5#cd zj6Qqh)vHCdRD?p9#&R+6E9={QEWs1q3&aTYTM_fim|xluczEcy+vPPR!a7=P605aY zR*z9Cs;;&^4ody2?y?cJ<4;aabndh6Mq@4o;hIS_RoQw_tD1bd1ibK;PM$m^I_$Rz}9H z-)<&2ZjpM~X!omywm&TGmmMR>^RSMoHHZgO32-$ex2jKVv8`0jdFpNSt1ZjOe-?lCMcHQa)ho~eEoJ4CFuxhmh7ETvTa#R?XDpWLkeI=(UjGrDUk<{U135GgSnO z?3ShUQT0n>_08Zl_y*l$F1VzFftkVRCHkCHQC-Vm65r$m&8zsYv#YlpK!!koIhS+; zmx`OlW>Fv}2)LB#WRIRi{p{+kP9X<|Xha#ZEvRCGxqj)WVO-cf>&H`1-Cfy0sm1^A z(6A*H(^le1yOa#LNd7zStBO*tm?g$sr7kz;CHNDZ?ML)@Nr+o`K~ zyQ&+ps>ItWsFcFIuX5&Sr*XZsUi`0TlQFY&{R;+NOvddqwK1#_`9+sS>;I&ayZ_NW zKHjbW?UbDD+=T-AbR4e&OxqI`yTXcDSF8be3y1VVc^~+gMEBNPGq2=p7cqfH7s?{#nO^KfsVMAnh=m`5GE~35b#hyody6mQkcSA1=9=}^&*vY7<`VPulnzR zD>On32%6aQ@9yrL-&K1Cb=fyCq6~OQKF(}-Z>NNePO*nrALwb=a48Y!K2FinY-B>5gD($_T;zCVB2Fv(`dhxj4PrsD zlAP5ajf|n%iLFMNhYh_c8_pzrlQB#)hPV14<;vs_H2E&Q_-S&D=8F_A3fTZJMjxoS zWQgk!&4o0I5=j6cb0;R;bKo5x_CZf0aPV#CKK>Dg?(w0I^iR|LhdkAQ@=4U7(!g4A zl%R$teqX1rHK`60w$=_XW0WDUm0=aFlm#^a5OAC@HvsljzVB;6862aVS%`GbDBwZ@ zh6pqR6i_y6s`vJ@1Y7Iq&uiF3E=%e(o9Z< zBe^&mf8g*I6Z!UYgxE|wVtjkp?KX`3*3TU)>mjs`XViDeQI@fWTwF@|)0`%1pxxu+ zH^b2xv(FXf%<89=j5he&U?PM*+zoMAGC5c)VF-*qS?gV*}kIDFA zOlb1>{2sZHlG%z{q7Cw@0+RnSa}mbzC-nrfs=^Nq-T)sD9Yg*!RF z4;kV-oABBs15=JH*@3sT`%1|} zK||`(@$6q2>R3P4YY}TwW zZ)(q+1#_oj_LM%ZJ%d29S#~>}Idf_~`M<9IH$lGC;a5@rYTWr`+UeB%@m*5E5ZJ|AvM~@ z`F#k9rwsKf_u)L@}^TmEcbxSVUZKe4zfs z!F4nm;d{_*6>-)0ovBDF048mWBHy2-M+E?ep~Bp0{2OWLs=knI$X;+TL@re6mS}*$ zzY_6R_gAHt*N!4S4zSP{?3B01%i&DX;_ZiPSr?Nr;30BfIgN`EFvltyh)aopm>35U z{R|6mhkYLm5s0|NlLM%bH`rE^HuVZDnN4})Z&IhSLM@Wp)8-q6zO4l=8dVF=gc2G` z=IA%YHS=_71k+X0`a5stpy_s(nP*jh=hlBs{%`qcFMk@e$o|(m%E$lfo}3)-`2RLa zSr*Ge2Fv-*zr_{4vfnQZ!_2LQ_;f0ki6)3)p$-OYfW}oh)nV1+Vx#tTmF!IjCEC8T z5LtvuyfVz^7JU*C+7eC|b*pyQ7Ix3Hm%k7v3&$REAxmP-wK4e}xgwhj4(Qt^0TL4w z>C|y9gCWY~tSeU1*HN0?!`|_?%{+70hgMoO$=pZ!@+E&2q`c~%z)q5!ld0nY1v+24 zWYdUcG+8}k&GBX`o2%okK4?lT%xKOLfgF9?EQe@%SeLu`W!?)o1`iL-e5>|3WYGgD zS0$@cATrIX%6J6O=X}fges&!_KxDhJZmP}*{})dKae^Izry zx<#|*A}c@){OseqcM9y_zb`KTb@5^Fr!P9}Y%>7=Kbxv3Sy|F56UuYMW)Ni4frrNMs)ZlXkJ4f2vPZC6lqZS@^GvlB-<0dDCCSZn3voEsGd=6mfn!!mB$JOX^GYRB z0^N?W(n_C!u^r{&#S6R1*Jbr~hrG)Mk}baLd_cR9w)Kc*EMo$CiSfB?6b+``Be?aE@kLnC1q$PNvM?;^eapQ zT37y?x{sIt|G04e+d1s)&VRR3R#XKIyDpvYIZ3g|!3#R!rulVZb#zPV z)dJI8tK`$PQpvVhybh6EYWVV{7P(FR$5n)DuK%?nKrUGSy^~`6_uel5|8~lfu^;1W zh;>(NX=uiIUd8c5D-B&2yH5A3+@Ld7$ET6*3bF0t9;E^8k{EWWS+1l$$sFmG27Hs% z?CVChBXvGvTsG*ezLu0f2AxMGDZlTRVka?It+@o173E8EuW3liXIb zJW|U;373Dc+;A^RezT#w)YpwXb2Y4Zb1xaGRz;6mCM=|VR`jM(SX;@{{3@)3J7%Gq zg?rQ~gKM7V;npA40{@@8|2q@p2Uxk!ud~N2lK+nmj`H`vJG=dVJ7o`CLLqk$4g>{y ztaY6IVT64ZKPrUoEgU26H1>3e1H<1r;1lHgVC>VO_MpLJ{8}zPDDV`iQ&#IaBwk|= z5H!}26kdi5jqpA4z#SG7@IPKT;3M&8fD-kkL`mw?z{dnR4d?Ck;95|I8hhZ31_34D z=d)|zVa6NI7>l<0SEJu>hJUcO`Zs@lQS_?G%vQjiL~Yu#b3S-{E&*W8WFV z+s3{l0{J&(c-+|k{~CMXXUH&(IJkIw&Kr(NRoHN_hhSS*XY}`mGvzMzQ2T2*w3@sA zug*{3zCU*YZ@o4a>i^wtKK@H@r~hxK#3EgzaSE>VT!@3kL5@5?iIUum+-F4x&=FHI z!BMCRr{ZWPp@Ar0VP)_HGa4x$WG450NW3_iCh#y93=boLJP=@)(Kv>Z(`fu}u)qI- z3e?}<2R9QWk;1NxMl6upc-&2}JBb6aq%PGX%B-*~4)}>9)wf@>0bhQ5IWIWNE8tN# zQg%kwmY~2xI$H#a8@6$gy;ej}NlqjRGaA`=s26+}yL4A}WmmRd{{H{~0RR7bdpzU- GLIMD&@_VfS literal 0 HcmV?d00001 diff --git a/assets/rancher-istio-1.9/rancher-istio-1.9.300.tgz b/assets/rancher-istio-1.9/rancher-istio-1.9.300.tgz new file mode 100755 index 0000000000000000000000000000000000000000..d4b90c17e6aff1277be92c7582cbca01cdaee33e GIT binary patch literal 19318 zcmV*SKwZBdiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMZ{d*e2;C^)~a{t8?<=S#P5i+cE#=x*+=$L%CO(+?lp$?Tkb zSp_0X5@M2I08qB3)BCsYhX+aUrMG4GjPOIZMS_Jw0Vq@z3RMh;?gUtia6xGMVuG1) zW;pQwvc=NrbULTU$Ljx1r&IjDdvw8Bz^1@j zF7v+YI!8UH+v!xoaDfRjBH*O}Dyw;fM?RjlLL5LJUC9|)Sx>&_5q6=E)U<8!85a;# z)}_3Ud=kcwm2V?Pr^Ex^8c$gtg&-a%yC>n;QE(kXohe}=#=h(+aiKl!4ng2%FngfP0ry@lay65XS#Ie6L|y09c|Kq%{4%T($$8V3g-upm=F zJdPsDgyvHrN7(bgLnK69=bMJ7cSi{E(3iWIL1KtGk;|3dL4&OKhS0cnHbw28m zLP*Ra=%WwRgKNr!Bz65as>2nZNF2Ny2&jN6!bQZVq6*LwBvRkziPI3^LjZ!!s$fog z@@Z+5#y%Zl-v~Xz2VF_i=`DAd)iaga|5Yf3`j`C z=CC}C=r#4;63$|^iT^eB#vrQtj1U`QSHFpPbXgdc1N1j3)*&7b)s~D#j0Tk=Uqk(< zA~uf2#86T&EMPqABOKCj7SNb0gd?BM0x6%UT4r($O0#eQT{Jf8-;63#C53j>+7Q4( z><^6K#!aUg4M0pFHop;aV=ibgkdueFd=N9gkD5|2>9^ZS&jT7tG)OpZWdMxSWxv#O z$Kg}A|83{nPSZ58eVBq_mxSY_4OpdfWmPro^B|Bhh{OhX(qJi;Mf z1hE2p^W}Whzge0XOM`1zL?}%jmxcl!#Wfa_K5AM$)qgFq?sP@dMoY3}61Hdv_7d!M zoG)IC%*%fQf=G$%?ZE{62pD%nw6Q3-K>q8ToE#P8ztf|WmebardH3=Z=0y~`EvsyS;e{0DzTHw zDCo~*%Q7FDWv#!G-$3Z8zjJNqN(1xhfSG+c`-+T^^Rrf7^uC@b#g+Sc`dP$CC`PFH zAH3aM3jp{W(jU!oFYLdcd#Ec*-TKE~K*AQIK5VWHF0%ifc2A4z|Lpj5zy5boTCG;j zLL>!5M+$M2k$)}gU4eA zV=SO0XiLqqK59zgQEt!LaHAQ6dS{*)Yu~3&D4C%Ze8)kbKQ~zHCyEe`$PbLhQ7Y@T zq+Bo}nRU;I0i4Dx6%o~!9*zVso;?tPkzp(#z#$m{7ul-`4n5zlD`guT5gvg%B%w#b zapk-0q0;$67qTaR#8lu)0O_0Tk?L{!D_z0r$rRX-_t7uSF^J|H)O^6goyb3Y!bS7f zIxXIm=EMURTOa1lu-S8orcbeF z1JIpLt9*UxG^JQws=C=GdZF@acyiP048fdD2S_V>DlOL>uuHR3Oske{_FVv z5OZkh<G9c~|L>v{P6rH!SL6O3M>+m(Gw0N<){+xtWiG8j z%8HSXUOC?rAGq^_Plu?f=NxYzHeWmETw{V9^d};rT;-^TUaBbdMV7ktZ`A*-2!M<1 z|3^nB#r5Ai?d{k9F3Of;MD73uzEpvyRU z7y=q{5V~q+GB5pZk4T7p@=q;ew-ReQ=wA#XpSU;$#03^Gihc87A+E8Y+{Hc#$7LK9 zkD#Hzenh>brbc5C+{~uyxRe3qj!T1b3r5yAwF`j9AoN%;zyiu`C>ecL1{zV%OAkth z>Y!v9lxsO-#e~W>PD^{sJvXD1`^&1k2pM%2RcuwQjsPdKEA%2tk_iLTbnc{r7*EKN z{n3U;aP=>_BX9wsm{LCu!1XZ+c)~qcI-fFaEd7C9m&V~zL@kYxvJ%>dnD*&yc_0%; zX6a*AlQDEz|VUgxyi*~@>sC?&D(uX?nXHwrEm19?3B z9bBQ@wsd>9npvD{W>Nd$%EmwH!wrliCS`H8zLA74&FSdBEGwy7|KJg^CG-Ch_W$Fe z{r|MH_y65Vd0zX!oMi>`|MIInzn~?kz4d=@{oh;vS1NV*B&~>VX8m8x|Id0w z{(pAdJ=ydBU6g9)3B|rM4?)Ro?M*^!Zs}zRy)9f*@7AoSYpvOpCBukEt#oFs%ak@=b=|j62_19@8c-)A%IX|KhJAQRkd238RS@E&PWel6cY_1F?&mxF}QH$&(>ID z$F}AAZ4Zhib1x%DCz5$iB+p}zsSC<`xmsb!Z~pSLHR-Z_f@m3JC38oy#dVAwiL24) z&#SEe`TRd|NB^A;H`oIfo&R)CdPV)ecd|eK-AS=m%uvdQ+3NUDI;>OfUEyz_rXy6i zb%;=N0)F7|Nqa)+eN*l*TqG^LQ9t5n%&-UL+N}p5Tt)RV=fp}-^rmh%0? z;#xzsq>uoQit}-P&E>nx^V`e8 zhx7NB)@y{+{V;`T&b2cSG=Xw;dw2Cw!MMFXzo-MWc(N4Xe*f|2;&O2H;r8zQ-Mc#U zOyBLhL$>d!aujc(Kb1;Vz~7}ofaL=CrP&^mu+1mUH_eu-6sDwLHddNdJLatIQy2Sf z97Sz?k&?Gr96EfG`$BK|@0D@9isr`GMpw)ADv;l3S9aBiX3axMe94n@b$LA0HR~%C zBtGi`ST(s#g=H{Hsv1SR9=rJ!vz@GA)2?D!)e$X~qx!k_=X$WLLY;-QSqT9HrBT~| zo|Aq))R-|V(61Ng6Tn)kxX||{GGsGp7389^?y?@(doaQqyHB{eyyM4aBLeHl_Pw{X#@P&N2ezz zM<+#l#^&G{VDDq-&t%7x>!p%4Gm_-C+ZOsd-ORpDo@kTB*K6vW`|@(ieEz?=?e{NB z=>NT<{@*>7)%W~=2W2h(zp=?Non6mjZp>O=!_=q)D43(HmXXX`IIX=T7~`q8F`R%r z0$cxc8Ue$CvX-QtiZZbDx8C}#nghNT2_^OE`uMw)ffw?1zBi_TEt-utnAB^SjP2Ie zF&7v6y`fCEPPZ4I=Ohxzh_WH^JP2=R+!cP#O}Tj3lM{rLC}hh%sX%1QM_QyxB`QVH zKVrhbd&_8afg?O5J`p4#u(mtZoEQ~F$3{+Vu5EEuA#Q`ysI5>acZ4&abZuPNzRB&=)=?!Cu0A;xb?Ui^FZ404D+jt6CeiWW_Cit(R%o#TD{&)t;y>u%%d0ND~- zCqMwI;#c~6`VSoVm1yT91z2a3&rRwVS?bn*Vo^!1Uv6RsUL^k?cg{-Zf4zPDr=66- z`d0xp9jG?@R$koA^aW#Td#`I%Fb8!hpv46iJegN(Xb$!@lMJKO4#nsr0iKHZ9DHSB zH1(y1bNLuCPKoo_qpDG3FzDfZgC`L1_!wBFd(4lFh*CvZNf2qF>ipZ1&Q+3g2i&QUX*# zo_}3(g%wwlS|<}BA}f4s#fo|Hu^D$!7VQ7#KxMN-;6?I(x0L_o>}+rU+fAvI@C%1# zHZQ98DGUq|KD7mzkPq{1m0HLSit|!j*p7*|BsHkd;jm9lmm35XMoB0c9lqU zywNyM-{~-G9@1%HR6e)}yZ3kuJeLX%4M~XE%*x(`rUWC5Cg2D9Yp2mTNLo=V0NrR` z8x_zFH5vy8NTs1fZy|!vgHSE7R&DA*4OT+^DLD_Uf_q01=rjkpDUZ@aM2=j_7`Vcp zy+Jb?qafxYdtLY}%~F+CAmP~1$FmSeuQ&k8;l*scMwC^2_1Zyq6T%VUXb3KjIUtFG z6>AWM0G@1r2nbIx@$t}??I6lLor^DGQn&VJXc!aUQ^S<+0@c2c3I&zP3;Rx^aWzt% zH>tf6qy)OSd8-lSS>IJ9+3y@+Lp?l05Gn;i;xhp>qP|ZbWOZPSvVOnO`0Y~tl@U=> z$fZH#gPd?G*&cLPPko*%+v)M->)#eG%)umvA zn(tFOK_Ac>fqo2)h(C| zYcta*LsZCReVv*ab2GHjP?OJ$y*tZtKdMP&T;t@7(A2XQfaAlfZOF&t(N2Qq?Vd6XJDhD(2 zafoP$VxFSvh8`vm+I`ba$qr+rd`b8y_H#`;jmG~*hld}ifd1hj zy30-aTmVxCxStH?R>9=w?lhM@FNpkAxE6{eS=K4F13Wl3`diMi5Y3lghdltC*S zrQ|_wh|qtkY=Vu3{=Xm%%1=OxgN6%xgd2;KhXUd#HPh_rt@3lgpE+lDJcs zx4TZS+aBxe!7b&~244F$@~9iD8wm;xL6HXnSU^HZaI(=;i3JnTzu zrkRKmm=&kg;xrn`mW+~3W1aOcSa%o9szTxdx+ zJk)xo^;%gCFth9kRVIXNk4k*eY!&gUY$_-t0N#^Z2n0pLS%f*)h78NFXJ~j~&)aGu zCM2pL?_MSUmsQFC#WeE6Lp{^&33QcTablv^lbY|^f{6`Zx97aQPT!|Fx51`+gNO9;RoT#_>x@(jSe(cIq6S`HO2O z898ebcNrru=R&foSq1q>lx|4oed30s_=OI`^MmmyN?gmUKF-%H!kH7?$GD6y-B6+G z{Fd_Ze@un@Z>+nvhQ-(yS>+Z{d54(`EMnf0?{oKFqvXm+ubX>bxKNUNo4}O(rB?y% zn@%mLZ#spSA@xANI_e(f-r242idK$~kLsH#zRWfAZB;YEjVc;B>K=FMdsBXEx08RD zw&PcIrh784JK0+uSLp@F+h zM*|WPv3-0L*yWJYRdbkCN8&RGTvP9Dnz!_?DsO3dV%OKr`dY;i)@Z5_^0|O}1&b*# zrcFq$q1TdGzqUwQz*T?`0SGpm$0XBA<*rYA^QoDAI_(v|F3_n$uCXMcO=u(`*HTEn z%uPLbRCe@qX>*&M7OvVWoQw6*pC~l9?!U>v`gHsbrr?)kx#UH~2tBtv{O?xs`k(EZ zx(&@wmqqvgPC7;X@8o!Y|7SPFcCPF<5;0a8iFC|JEvlKE(+NZAtK+(UyX~~KPjEZ= z!lj|MHya=cf722n@1tLSZCHFW3Ghe)DJiGde}7d`AqnQHKVFl>jCxaJOY#D_5bsk9-owkCksDMyJFB-WpGt z9Y!a;0L7ho7qcp18bzlB(+bUF`4+UNHk9PRjnMW&Mp*F;)1g*dnyFL~G#_@a*T7L^ z#prO3dQPX|fsTTz9;O$qaENFGp$Tl1#NJlttGO#>bSQu7sOB?syeOq$iflKxgp}q+ zoxsHBnP*z&W`#N8@vx;Ym#M?)#9vRz*5;6xgo%&S*IbJdPGdsTGk|`(Jtkrj4;5F` z97rn_#Fc%_jyoS5T~Ue#>Top_#cw2?1Iz`mwwOQw2}|R0s=I9xjP-+oPsem1Yfb=r zP85zC_b_{)%u~vcj&{^C>X3aiUcL-R40FL^SHuh&0VY|<;1+@a6W{1SEr}`rzctez ztBdCn;WU83-;C}QGe7HR(vOC%k)@kxYZ1{FRcOUa)JcLf&8s;9KhQ(Z8>w2RCTaJJ zSC0M-%AEb*a^cvljRpJv@zHT9|If+Ue*fP|v7MOusHsN2*%-yXU;U9zR>O!U4e zf$9BIH?FVLr!Jc~)cyMyL3k^>*EmpqkwUgi1d5~A%88iM0B8uRdpz~yNnEH1w-FY! zaezFkp1(mDUEgT*WWx{OyEFi&q4y^yaT=(Np{a;()@$Y(OA{w06_vrEa0DUo`smYV^Fz%}^E;WHnyh-X0|?dml$qfQdF7ML^$azR=@L~e z1O$=xxCMBFhH>&b@#ZzL)6fKS6afo}j)7>tKrjV16BBh4Teg@wgh_#hQC|(KZZ;W! z5@;8Py7k0eOksUh@=7IyyVxh; zxIOI+LExS~rdLKN-K=c|PF&yu!Wd=X8Sy$XQmd1-K@|J`ATq($o9F%mp7Cb>txi{y zSbddCLn45uhOR-;cH-0VI7tfR)A2xi_(=r_9z`>K?CYbkCJ9GWKb6dCFe2c4**CpT z=d{)7wmQAL?n%FM-0z(Hcjv6%>6rd2`z#yC+1y9Xu8N{XJs6~+IL-Vwt9_6lt*>rM z5NQZ6T~iNcrW9sRO?)r&fDy`wmONjMPn9zqq6n2bXbjt43Ts$@P|=Tlitl`*N;>8OkV z=0-tjdL_?w?QeB7;DoIw+r$M80xH>qVd#qQ;(&%@t<*?`O$&QYBXjaNK(}gJnWfu| z2_SK$HtU3HsY|VP7gZlt=m%P)OpVEe^B8#hxO;qhp#FAgAX#qf_}inC2&=9h?I$DIxE;s#BOh%+1fByWdy7igJi43qeL!7FRJFlc5+1HZfcUIFTm(j0p zkOBBz3A%o55VXIwaDM$S%|Vu0|APtm5istEXdP3){Po{GIy)=ne>*$g`+w}DeEQTr zL{k#<)eRvjvPePeyFik~oj@NQwm*MvG-S2LO}v$s4-ZWsAAN0EMw+Jg|Y!bi^=M50LmeLs(5m;5t zsGH&3l4$2To8LhGOman_=KpM>=AgOql)h35$B39vo2lxUT?D4S>f(@cP-+fJS1CML z^<5fr;sFD1QEwCf%*yGM8B3g8qe<0e>!}Ge=Oe8~I~o2hi#n~!!PiQU)>9XCQB~3% zOj1XxK~~PH#xPMm2^^glCz?}JuSG|XXHA_%Lw{#5QMFT(hvhMwCuFAQS{E|P!q%TE zShPvv6**a$Om#H6lU8(aORqJY<#S9XcC5;eBFsdKjxyUoX_S*2{mYGWk;<*gR<$`| znc*ydn_d&HOr%*tU&SHGh2sca(hW+!;|!fqjkmBrz8!cIrhdn(#EF zsZozB*LL|@aLFi=cUV`v-w=(d4WQY~-l$B|=&QjS^c9!yD&|@leqt0X_{@LBOIIbV zhONsz1(d$CnDw9k?|<;Gn)cgQUvZm7A2w0bIc(aG{xA8c`MPlRA>I5Iw5c0hx0U<_ zi}Yvd$5+O-x}_0FEVWCFK-O@P8rhqw5y;+da|Ei|+|3A(5qc$e-B(g@zJW13zQ@9y zs8y`5%!RaNA`aUV@HKihlyYHVg}+9xLMoPpmUg=i+UwVLFBD}^6B0g<^~j>BRk2y> zb6NhgtIm_p-lkm74$-Gfq)3@a6ZaFZ%p$;nduUI25gSKnrd)dSVjJz-Xvv$9PmX|s|xxf&I+2mDb} z)k4Wg7P)+{nU_gkP#9t2qvk(oj6CA0_;?2KoL>T|@Y;FRYH<*#0R211ab7#Lmitb4=W91x7wIhit6?% ztY?vC5iiXrv!z>m5xZjDn)~&}`@A~1M%l91ZmtC&XI5)AYprG`>*-SGKQ7i8Jl6CD zU3mZZxDfxh*XdQZ7PG~JO^ZVM7b{w(jk zj9O9{ArVT#fZxN+I74FwIzqHcpfxIdrzHE-4Cw}_{~e+MNd-C7w9h`9s98L5ZJNGj z(pXNoY3pQCmJhj>L%5})jg%;?eUVj6Y9jR6w2deUk&BJHuqLZcq97~hJ28-bkmA=@ zZeW>+v*E zsB9JURrOpd)pK&1Prmm`N^NGUSq;pB0=Ecawws`)>k|mYfOvz5!H7K8wqY%>Ip8bp zO?8y0uw*R&3%UNXPtp_(vBauycY zlN1P^o6XmjrS|{qY^bdg`>rSkEZ+YM@jrUKlaqb?kDZjnfK;E?yxvH7X}`9J9DHI2w3iFBtgK`{K#tZnh!_o=J0 z^#%}7HcP&bLn8X9Ih+aLE2E-84R8Xx9w`NfgniVh#qdYK5#r>Zuuaq^fTMv8bS>0= zinw5a1B6{crpi?>1eJQFh}da8V&-bXAU41T?&CniTXif`bOaUHPXnfGfC~F#pkNh9 z&mvxln}B-c-qMF~3)J!n7g7owcqzQ~31;E+nFtZtII%xgpxGB=1Xy+d zAJf%s01Nj2qfW21|985b{rI%Lr3m%UdjPrq}TF6|4Y(~^mT5dCSl=29Vd_4P* ziYN8V^yc;!y)^Uu-w+54x8%s zg`4DMb)UbFL#^pQMbF1{Ff#C;mI6sDbwv2S@ZQ>1v3rll^}A~Jx`uw3_%SQV1>N3q zR2iAi=ZI`e$_GzRg1vC@a?7gwe?VLn78O{l>vWvc*6-7x6;bNXkqH*<|D9ej{>RDL z@jm{?PRa}1|7)k%Ze@bh1oWRjzt2R3|NQwqx~9~xJ0<#k8VrW=I$w zJQki9Q6Fg#<^)GwT3`39#6rjzUnI26)k@;kE0ol)-}mh9CA8T>-|C3{tlhJX5gYWa z-l(yq&g?hnf7`N+{eO`;VA1}6)+_4&C!Nmee*fP^`9k*pm$LQG+4aSQ0rvR7H^$)~; z8-2eW$!V)16J~(xHsW*Z-7*{Zt#Vsk)P0u*DoE89NT5N9FOv=de%t~H0{r+QJHLte zw$c9IU1s-R!vEu}c>nA8^my<8v6J%B_y6t}xcd*R?R9|^u-~Tk+tk0yHuc1!@SyPR zE=A&X+rHDEAujL=?Ox9}tA`zm|1aP#jNUE?YiRJ2^)&lXgMo7kwTf;|=%=(z97{1#= zMJa8Iad{%~Yoq=D(OK#$@PhrndsfW<-#b3-?)^V@QeOK0|L8o=Rp5>H|FJ8XUgU#- z0eA5f!m)jnzd#)L2aUrq`r!iEH-P7QQy$gwnXx;VNpJ%bY(qIvrWmQDbM^j|bNOJ3 zWz`O~E<)>Vn978(HF{Ny+$MBNv0Ir=^QO>Nc8OK_e@H#}eZ{1}3-!N~V*IDx>0bZa zO?iR*Ur(`TviX0Y9yqFXvk{mFb+&(vthuc|nBdU!ZKv!-7YFpcdGpyalH-E*R^dvf z@tQ00OSGPRxnA>^Q?2jP2>E!t6wjWA6G(6p+Q7%-%^3A$h4>WJ{`v+d9g{*%RTY`S zsKOiD=6Q0MU_h*L(vYey%XY9*$*<|(cFvzR>E8Ji6JIi_C+j0lTG&lTpW5oa>i+*n zjM;s>ngFm!|2ys!{r`?SXZ!e%J1H-4|2I?YS=7G&(rs61eE*fAwaBw?MKk3TLXWbI z43!%jqzn#rETx#^@j@my2c3|Ht?i){(r_iT%ruY_vtUlxpJdu)RsPQ-xqOF|ie+wo z>9TIQU-4oCg`t}in%wytPP>ScdD+jy(k8>+4^5j z3|zGSPfm;ZpO25u_WqwcDKCEgzqF;l#;Si&3%*b`SpR|sE*49$fQ9S-q`3Z1I;Xw; z`rk!)+3R0Vv6sXG?i5CgxPZp{C9nIWth)ZigtAazvRLZR$|Czur(0bAN5`GL{bwiT z1+M>eiapc1zq8s;R(WvRT0&Jc3jWdApI$srmlmZNX2X{>pB;U@bK`jn{ zoeAKGLpfLv5zEFSHYn*#odXGLYax~A1@HY}-p%V60~g4&e7kbp&L!}6*unaU(F!F5 zj8V3fc$Dvo`fl?OitCTiEwHH%V)YJi{o(FQSo?_1=1^x_7IFFeWsDGmig;Tu^fQd()Hpt|2EaXF4bOlgDXTT#G za#-G#y~Yku#3liJH{X+e#R$p~7L&T2&bu0*?zdJ8}LH;uh%Qef5)A@|Hn?siJso-Q3S%%SeCvI*bHuf3zIOuWYY+e_SlEV9#T~d7b=`QoyRWs7Dcm z9tp=N0M7AvmAx$OnE~9^)`jOU19wRNRUV_ctfcr?Yj|K@B(q=S-bnd zh!9XrbF)#8|DFfA!%pu7zklPhp8U78>#r({{r`&gzoVn$eg3zdlou=iJ)i4uQS@8Y z>$m&TeSYn--uhqa+D}>*>3_XWY5kvc_v?Qr<;AZ5XT0`Pqx)S`>Mv0IB+`R4<@W-O z?vmHF~t_xSX<=>O3@KH10r-bpFSaX5;2 zd)hSu+uL;DE9A6KpK9c+QSAFleMF8H&Q=xSHWT6?@`;PNZtG|4$G{zH1}E=zqa1v2 z1!cO~01J2W&cb6JCdsGoDlj@V(A5>2@k|3_)b~j^{weaXfCPSkA8%tehCb?MPksv1 zFq6r1p&0Lt4MNJNW{n#3tue~>isi>i7C+bfRY&5R43PPBR5b<>^#=O$VtqmXfdhZR zH*?xEyO5F0!jOu*U{=D)bVl!od;nU_vB5^_XlDX3mpTo18CF}MV6i3G@6vXcgi;#q(vZ|+MFKpAK@|JG z)Y}*ci{9$pb?p1M`i;%E7!JpeJ4o^iY!zB*Aj0?3;vrQvmL@o7i(|*} zJKuKZZU#s>sgV#kzh-m@eXCK4%MT#zM+iv`e9lOl{~lmCV3Z2I4+=(t+jul0kEq#h zBJ}xldji-OlYizxtG-nXM@r8+gPi1)M9dr)m>7(R4~ap}hQZwlND}{ecXxeTf?nKU zieHpJ$deHv5)whM{}y~ayVZuIK02`~M8F93(pN{F^qo(pupfeF9RdiaW!7+had-9e z<>15l`^($w^NULh(o`p9V>2S)dp9u3KQ+&<)xah4sFNxHrFP$a{9*9!^5@HU z#Rm1RQ6qWM@jIA;ubFJK66p6IZ{DBZRYR@V27EdmYsiBUWdRnofbXu}Uk<*%x_tL` z%{Ejf8AT&%q;aRBe7@Ki4rCu?dz zr@$ETpo)d3`w^!;SoraHs3!n~Q=LpBDS>QNzq7VK1EF20UNP}otL?w}y|NmQ%5Akg z^h>u0Ic`$b5HPVmd@ZldZ$Z~-md5r1M|enlA_#Ej{QAmh{oMHxbJS!s7U0kbLOvlQ zp$~AH8&Mw*luj^jM>f$CD{ zf0GEWb?|4(qWeE*N5%XfozC&`{`_wjrGCoGLFW;;Ro-6Jd#wBdM`SS7)#Qiz81YBS z$UkW)uzyXx^H@;sVjtL-bEc^9y~At_BDHu_oth$bu2MKTU3t@PEMg+*&}8WL5_Yko zE5=gmpM+xue8sGxT_?)IY(D|>evSXMP?F$c%bZF zd5(U=HP|5toH!v|X%H`;TT~o48^TO5{~OGz=#y9n1_`0$8gr7aqNW8g)hy;Shxrhl z>a*8l8@8>99-!|jLk3hU9Fy=78UD@@^*vxnOmK)gPOsy1P|QIpV=;r)5WDvfdfYK^ zUU4v|KD^&E{Vd1_OYQ#@$6~U+2Cz{6?-c$2I>(*8 z|KCo^rk1Y4&aY<9Kvf^a%&*$+^<*l*GF$z}^H@xDNWAs8`_`NtekXDO1CKCp#ZNa^ zyx(Zq3$=niV}?Vaz4QAB$AUJn@6(6#aCYm`2#j5F{wiL-EVce49zCJ{-#a-zEw2A= zXP^IJC#A4{bzF3Z4YBJ)Ji5%Jc6$k{*K!$4yL>fyEWa#4`9 zr{7KpmPojYS6?$SB|ePdlDpVfL)k}4!fqfmqHIV!55n6ScZF}d_x0h|@*USR)rfe? zUo+>o7vRS}Iz2f#vR!)u5@z1wa^(8xeayv89GnmMR&QLBO6hAA_7d|F| zwMjoWm#;5N?f;UwwpN>wik%VfmO3c3EiuFXjL2o*eDt zf9<5KvHmaomzzLIB1%G`f2GXD{G^`wOA4y#A8ntGvmX(o zk2Cet+$oggGVIL<#trLGNPt0SkAsOfPiHl!*~#QUuAv)yd#YMJ|c} z;ZZ=SGrhe*O&mqMstFH6Ktm3OG?}@T+nr6{bAbhnVxN~d>EUri zJ)9mMuhrC0KCGM@jE29{q3)3RV>Re{+GIyr+F?(rnk|&5I`-2H1N2MtP$9KC>7t0| zOlg(FnU0@c0c%mth{6jSGxaLkC;%**6sy3gT8n{I1fQvbmQ~9l8qFa_p%Eg0ed7p9 z{mNAXj}b7EER!W+N=^%*FucRC+A8RR-b2Vha!p>jwypE&y|V%6k@}IHunO=+5*Iu z2K~83jeU86Woh|us1NhjF#|93|2RD>#{cY{?fpM@Qr5KpZho};g3Q1>JUY(xIy*zI zJwbj7Ki3@lUUmIHf&I7JIVrCH?#b!i|7#~@GyQL4Ht=k#*H&WZUHp<#(13>iY!IYZ z5Z5$x=lxIDxe=0h*S-M0u`mA`Wl8=oRjsG;|LvZg7SI1W$47hqzmu|M|KIsHe-6+& zM|6ZfMiAagKK4442GkkEMYF@Vrzvq-yi zl2G5Kx%9?xmsLxm4Bi;$ZwZa|ts9|ckJjMTRno=RI5rX!mU0uP4=;TEdQmMEp%7=WTnzl$ z`Zga+@q|YKEW~~*W_}g(%NjzTKlj`1>KYPZ9W6GE)!MA8$0!wbS6iP3rGE8WLyJ$J zUM)uWb;agc^Rrs?GIO^oK=b^av+6^8xh>w+=Kz(n(`>V9p4J6wD%Q?*JB@6+{$<@H zER%4fk^o=_r!6+Ef^3^xuy+?aMrU21@BB^7CFA4PM#f#f-CS_oGWGJ&?$--#e_Yuw zJ4TS_5gk)&kPN00;Fgfw>OQr_wn{nYxwp|TwyYxmnP_q=i-C*Ie~wE2zrCZ*{`_Yr zWs&@sm=s^U7`W#GYn7$ezcIl;%h+C8F7vjR|G(QgJ1XY?-k<;Os2rGmd@Rr_A42rX zr}RFKP;=fvP!lz+^OL3&Wt+{{uTclJl$B4y{Axtw&_n(2kVjl$z2G2{mvfUwwkxtl zHubGa3z|uBQK#uyH8YcwY5kR;w?cxKlA*5me-seRRS_(^Nx|YKvxygx|H_2ZYH*Yz@3{XHgmvlqdikl{8Q6wgST*`E^M^B=D zar0KEkV8WlQ3kdJRZIxiFC8_E3%h6ic;k^vXVf2W-y z|L>ih?(cu@q%0G)yiz{ANUu$oMZ;p8@|_5P2?{#4c_00$06mb&Ul)}8Pa^`5K zalN#@{$DR9V`k<07Yw^39Jepj#;`%;7hM*u|Fcfv{zv!pbie+0Qwp|o7Yppuak2^s zZBJC}3M*z^xeDcdNTd(=&e0z+aqp3w3IhEnhA}X7r(q@1l0Jcsxt5v$Ni~Rs zEgAyyAfQeIp*tx|5w3!1MvQu~N;-`Gh_SEw@1PqP0YieO_WXy32j_RyoqmEE<^j|SEA=sV{ZBP#+>`+<(*H{FpSow={r&IVl!(zO@qqP_ zo`wyVh5{aw6fMn0F2p(d^tn-p9Is5oS%qJIEmpV#7L+T=S^d$-7^P}5GAFfKaoV`WRHSn%*!JeOX8O4?7cMli1S78YYFd>5+hFL0h8FSk`Zc%Y ztXRnKttCi84<6s3uZV!aUWLmHYD=3|i;1e1SXy#5$QGSBEGw;(JgQ{+x{6+L`3mJ# zsF?Q@`|8w;M?Mj#`KCDsgiR;Os1zes04jEt>C*d3#X~_O>eKP;Z#n8%Pv=jJd9z^= zKVD}#l-5c%C9D*6vQ|d_nz=G>wk$DUR+uqM%$cfLv&Otxdgd&cJ2kVX^6}C$2&J24 zx6_$3r#6%Sm(~9!;7c8T9rbUC`(NFn{?|L($A8*Mu^Ey!s|}1iJYdGKro95H1I3#| zfJ%d0`dP$CC`PFHAH3aM3jicOAM!^t!zb}du>WpIjka<57~#-UhI*Cqm5#JyggVe_ zlr+@D_^#d7TG?BzFX(fy4-!zS7+9uC^d}N54qbG5r2ZtqZ9E#0N7QVUan<-;s7NXZ zr)`X4-=Aei1qerx!rW>69W-=RU&uCOFE|>4i&eTM8UXrRD*o#Js`T>OQN$+!5&D9i z^7eQ&oGDto{ctPml5mW81nz66aWz8bSVaSIX(%uWjf04OMg)2wzK@20VlMII2rJ|b zwzZ_qyaFp`Q_=XF)~T(q6v^#r^OZv1)&dues)c7t2@NH8^qb+Dd%7}$*(z!M2X7Xj z>2_C{XLWxU*8h_H-}2F3{WNHi{jYaYjQ`i`bUSeol0b)2{0nm!GI0WxGHBlta@B-)V{8gzX_p4+jkZsi%>~chWXs0PbxxN!s()J zE#0++-81bKFT}~hi3cubX{@<634aGyh9VA9DfS!>qZ;3XlN5`1tOf0z3Hc ztLwjAeHi?BcXvIw{&;g&*~GWU-EI|*?=Rm=%-)~>KZCoQ^NY*d%CF0lNgruC*=c5B zY9;Xh>!-_`e+>S5etS8%KEM013c6(9*#fnW{L$yn_KYmQF<0eKyPBP;DnJwGTWUMy~ut&BHe#3R86U@A)aVQElie!-F1TM56 z$m-hf!pzPhuS+_dp1N0a+O)!4CY#kt`D?d_Y{|6p>-?d~&0HWh<~V1Xo^zC@T{{i1 z(+t2oQ*F&R)jDfMax>jR5{~sk&-!fOn3WRAE&^Ad8(Z4vsPq6 zWi!v3)6mLsRGN1+8+4VAh66<^)j4xmGt`w2bg#81pKhTQP2EQE7Pc%&(k10)@f23B zeW@!;>VF$$fLx&e9i5z%<3Anm^}n5zrCH?WmqfoPJ@HB7qJB}7*mii-n$EA3P;&`* zPm|c}%Nn^2lX}*6_QmA#{Gx_qpEC4cC1q$XNvM?;^b1S_x~%*+b)PQ(|7q#`w{zUv zpa1Tptf>kbc3nB&3zA})gO_x|ZS(8Y$X~iQP`0WS>*$uUs|BXHR>h}TrHXB_d>taa z)bQ!kQsg%EpH>lGa{X@<0dm3m@12$7zaQ=M|L>-(82d4~hPdpCtqjdL&#O3@XqBPs z64&W|of~w{>i9g;T`9I*(xWV(T^hqKGs{)f4|7L)l>y&mHT$}e?MPjW7?%$^tFI*$ zk3r{ANy_gB$=z5I7-5SmSCao$*QwC?^_7F}sFKAbHv9oc5edhL#zNIIvj+{adk>-a z20bV*5`>c(r>Nl>c+RSUN%gI+wRV#NfNf5Qt&FzD%t>#nSstm?p@geHSZ%mhB){3v zUFz#bUbq@ozPXo(EfW^fJ}dgBQCeH+)8ZVSse)^s7U3>GtR? zhW})3^>6ZEGH%O%lAnAUwlhF-H;N-PB0liOp~D}d#-TIB_l-kG1oCgn$hdL%|1}QK z&zKP!b9D9gk~bWas<7b@4{%#oXY}`mGvzMzp#3ErTFqVmH<#yc-(Na`w^GL!p04!tCqCh`auj0|G|9tw!dXc9xoX*B*fIz0S91@sRO(cJ_jQpB~*&-lm*v3Wn pS`k4dIgu#LXl&!5UhsYF(tX*Nec5^W{{sL3|Nl>}-r3%skaq90g^} z?d-v>dfvScxf*GB`f7m)`n;#|ZH`f1FDYjZH??Ee6hccVBwG_#**-eZWb)`HiAG?` zt))xvZMR{tvVTk3%%7wGinmDyhKPaFsAC~$AcfsPX-7GUTD_YDW;j{!`~@|RES|gZ zm5q&EfE3HrKKi`fR^R2H+g*KDdHIB{i9nJODpKRS>t_Z2u1r2UUHwq&v5t5abOTI{f1q;Wt+5+ESrEq<`Qd;IYRQz+;KS(@9yuzwKi6200i(A;Cvd ziAi+tIZz+jzH5WwT*HWwPsYPSVC3Tk z9qEzCT5MaTr}ZDzj*u2+!;KKl1h48BrD)ocCcD>|)43DG1O=gJ$J3G$OQV^K-B-ku zUwQ6-M=z|s|1q@GMIP*NHuZCwly#WUWscGs{~I?2R!h|a5}avrz}n=&mWz1ePomjz%zMVnRcU zi_Mbzh=y#9BMu z8ite#HER#9gB6DZcYH!zUno3dPx=RK`oq?B@hSK>N_+~!Ha0M+i$=qXj>EqdTcFl^ z`!egybs>d;dig=;MObuXr2FYZ$4Xuv#4 zi#>)P(Io?%3}nevaq{BeaewfmWJx${axeN)wSRXOS{G63u)*tbKzYMI@y+5eOPw4G zgNqhRJUtmf_(4VUlWl5a>XU-p6%}I3$JXM4%Wk3OnXaC+*f<(!ZGa{aJb(Ie4pfUu6-YY<>U zVGq(x+lcSCr^FYNs%^jqocQ{Ya+ECFpr|WA+0N3RH(`Fh-8#_EWX-CSCRADQg#$UrF6B00q=TjRk!8;kob+FryvyBs z=R>Y0NLaDj(M8^A7@V2ArL2)(^Az6~39DJJGX z9Z}6TKozo?HlD7)5>Ev73`k%g9BLrao-HNC-Bza55kQKq9q;r3HUwYt>@{qXiXhF% zF(UNuWL0Nbi4e^L6IHI*gOO?zm?fb81XHN%WYe2>KcOlk*$KcqV^bs!^$#TsuFcGt zmhamlM>om)P@7t>8O}t0!wDLvW>N%q4uFm}*%QzXw`HLD-Jmq+j3+%pZbeg1qQYEJ zv7imb-E_{!WG>QixR}2knDF@H8e*RnP7oWZ-rt2lPAV@YKx+AZF(|Qsk46XPSa(*N zwV*L(X>r$OZszO=*m%JtCY2+JrKeFv-am1L&=a0$HrUUR!(TxPJybwWvJ>q zVq%gX2UytDGQ%(}f?}2UJYVb9czOAErT|?ulnpm^Ud?IT2u#?Jo&q04`UTsqZr*K~ zI@z`sD(?rAqrw+M-=Zm~%$-|$6M~s%&d>T7=qvO<`)Cc6_aO8q&|%>x=*aCB)RlDC z{RH5hLO1~(YkB*L$}4+cf_C5f+JJTH7cYGV>JBoxXnWIded(aQ$H))RRl95Rf7@$i z79-Nvk~lDNYcnf&b$f9N)l09vbhk!FWlgtvE;f6CRuU3PU@n%mi9c22y)Ns$JJgd=TAfc*F}&*KlF=0B{k-xSd# zmwRI>N8(@w{eHP?$(1%OeC*>5!`TVM-85AA5++ZnET&EelX*1_qp|i=k6Xl;@+SsT zv5FM;*)q9fq-r?H&n_*4@p~V~mbQ!L?@$YD<8mjlCQ+BT{ODRmb9q`ip!-+ApI9J2 zkwZN!10U!-_aX8PaO(j8b|snAr57ZlG_Cy0bJH%{?i$<^`5|5IbctGU0`<{;$y98J z`vNEKCr&>k^NvmEdQVx=0zbNpWXYC@^=WicR|R#Hym){t9K-Q@#7Il1jqG=DVjNnn z%YQVqFXw8V#$|uAr*rVWB)YDe?63BcFfFR$`|2FjR@T(-w9~eI55qCjVFUq2OCtAA#+gP*{|nUUlDi=peC*RJ30XBoX)zr*bI)Q)L( zB>;3OmHvJ5L10tRf90D5DU7p}G95D+wP+P&2kfdjs(wxDY?U!RZSh2IzL8NnHu1Q< zP&|k1-tEyJdSK1O)Wy`MUTTo`E!19BqRnK6CB7t5pI+Q~l-sUq(aHF%Lf6~4WFbr$ zrK)pA{u2CkPjA7mGgSI>q_L7*mrnZ8^?L5nNGF63(Ygs80!FT=6(m6Y=~K>Xs+n)d zJWK80qV8O9D{nS#vB+}qtMheCYIG_$?XS7&skvAcxbFjaX+O}MvE1;Njv{bt8m3+b zbo)#GRJdoU=c6sDQSTG51eq(b9f^Lt*Lk0!c5xl1wTANwyQbw>Bj4fKV_0=z*caEK z{PqTmzpG5w%X9%MH7HyH2ug#P13z?yM-JAx*yyBRqL*M^8sZd{6k`=5pQ2|5So?R! z=JO_EY1iZ_Z7nD!?AsV0+&eRTa|9NsqUzR`d7qDulV3jou3NW;sq-Lj$9e(#tlSngb;dT!;F!}Yno}|-jM=LO=+Xa{w{R6jhy!XsyfOa9qB`=b zN|JcmZ+ecOnls>rQNf9CZoVHsC;Jg!`Su9Z`mtw52^20k6-IyAUZzJyeeNok>XmIp z&AFmSm`QBJdk_b=vELvFpO)$iTus1>$ze8{=c$u69+NYwmY}B;o{_K`@AsHeQ~+##Uy>HJFi%0T@A zh7rGnRQ=t#Ubl<={yW&Hu~UxTj;Sp6CGSAar`2x2wVg4*rYvnW084o(z^2LZcw|Sx zkfpTsxlepAj=jutZBL71Y^?D)auilqHuBqoBFT}orXE*CbvZ++HwnXBy!Z+i z^b!J6h$Nga1Gg zLz3*^XFwxXa%((ep&bJC22wSX{Nw@6n|scP1#?Z+-xy5_N)fE1*ELElZD+~~cQ!P# z#=$Yo>U12EEN2|^0iYwYIu}2b0R`x{{Hx*Y?9)@$8s` zkj?T_30~mp3i#|y3%s;;tOMN^3*3G>@=mq7f%F15_dvUQAcLkQ<^whl!9}Gu;Qa^a z*grL*UiUAJsJdXTi$pF+!hn&69_^ojJ8!t6duNmdcG-!>D8gqaP&l~c`jEXKs>Vlt1y>FKL|0-YL;Sr(xM(Kr! z7EBy$6VPI#hGJ8XexWqsX@s@SA1PACJO0BnnV8JwNOiOy3wZS{a{(8c6=&me(9$}! zvKaz?EL12rgvVF!br z>%^&Q>kOyn0`&;V^*E~bt&bmy1FmyP0zVZ^QsgTZ=4n%sqX@Ku(J*CoB+Vrdox;at ziJKqx*X6mSFv0RjS&cb}l~9qHMbZMz#qMDMSXDY)0wDQ2m5iaDUg}|T+ zh4sLKqoz>h)!Y`GzVz0RFeammKoPDpH4PX8|H->%v#cPExX89S4+P3L%cm=XS#*H@ z(2LvJL!h&BxmL812M5^=fmx$_1to_CPbP)Jw2Up4L}jkCSEXh7c&*w|l0RHb%vDeR zb8rsP{|hFE7*pDWGFnCk!Ch5IhbBF-c?W3S9_Je zsr#8O9frK8Q_PwgpBe^8t! zd=E%LXxV$YZrj7BvdlP+q)d3@fgTNqJ-fH$PtYq!72Bz-Y)tokGdnJFMbeFx6MKDa zvQaH$W?i!Lm)|Yv2<#oHRZ$lQWyMT6IzHU$SM~L%EFHIHQUS!p?0mzH`^)CbJh2-7OFP4s77wxaywr!xhJkTMzD0@D-tCFG&j5V z5KI$sh%TjQz?+X)DBRH-DR>Mo59(pNAx1c1t6lt0jM*IXk#sj`C{bL*gMn)17F zAZTl4KL}ky4*hlVZS8CUOw_amH>4;C7?lVcYZ__!D9*-9oldbO7f}(_Esg^@nm6*% z-pQF-8cf!Uk&E~xQDNtt&1W3QA%#XmLrTvs5@O+|uz*wBT2Mk2Ytnx_Q2Y(b{g zEZGkKvN}!m->L}byS4xJWv4RiljBS|wChF({a2 z|DA}fsHTq1V1k++S<~PcaFKw|P8b_W_smwr(dykch-$w8lYkt%(4mQ!&94Xf)6Yq6 zHGt=?eja}xLVN%MF~D>dBNca@)9rqR^X{}wo1?cZ5s!ryFO~*zIzc3yN)>(Ou;Le0 zKbgf}DvUcbB|rk*FRJ*=Sb9g35}z#CYwI~rJMr?8->;X&pPRIu=tqxz6t5Ns=!cv7-7s!x(#PVHTF zNVg**GO|r+wnneW?(S-`J60(MtdbR&)>zOCZ`1wkVjV!Z`a41FejC6W%*E*+zJT_x zrd+Nu?2vGpLl-=VcHv{xD@$Nm17aVertB;hxBQ8G<>c_ncb2oqEoK{$KJ`0W1JbVH zZVt+>UptY0w&C8Vo2xZ%VWEk-o-3&=NtQU{iD$e!jqRe& z7CB_K1^op3jq7tKr;b=Z?FDIWqDrKxA7p0fVD1PezVXreHTu~|k5(dLWUX;=5M6|- zoi@l#7cxFD0k#)CKkie38pn)`N>^{4I)+s|gZMwN#Ds91>Wo;SN1Nd%I3IM`F4wTF z5!(^|$4&3k&2%faAFUmQIGdEpdj70I^J$E8rcyM+$50&UB21)pRKUhf#^y5FJ2Wi~ zYIujZHVD08gq*B?a^q>|#qJGYek3xab(iB7oTi3>cH|5|O+VGz3SQ3}&wzWfW#yn8 z!rNQWRq6U{%BK|BC>?o1EMA2qXOb(QF^#Xg{+eT3PU{oA1GQm0$MTZ}B*lTFF4-3Z zo2vU3LM@L67px=^>ur@knj@8}h$S{q*G>z@#ee-Xt`DjgeO!P=^ z0kE$k=<(+RP$oU{4zL`1|ETMt-D(3dG^VG^zo&NoLDBPM9O;C`b>-!~A4pu{*WmS* zJ+DpmE5FhI=flIo9r<3qb`E8Wrx#4nqEA*2>t{Pn)}~SO09fM9v83g#VO4AHu1DCM zSeQ^j+qySFl3e1?8_i`eb-M~`M0m&auKE7td7dCiUO7+K8y zd|glQb57?E4S1tMh~IjnEiPD-@8i_QAKI^6CT?;9`!)ZPH;mmD5tX+yezR7gm#VnQ zvDC~9q&kXv+lb2|Z^PL$n+0R-u@Kp`lw`{UhDlcR0une!@lj~V(?x2}YPK=`W z=90met@!bnY-f+`DVzSn<2%zrbHG8xAqaWawq;62^-tMZaweczRaAdt?5h2KU;(Sd zDj!)(T;)S*k+)wT%r*)hD!!hBsjPF0YCu~U_r*;Cw($h2{#4?i-5I3`c2b}O>D)@1 zu?TsC`|x<`{~IP~Mu{Ov4&UKe{>30KC#K(l^w>WeIz5=s6d`dxX3i8@@0w92I6m9UpWXm5;>6BuYS)8ENQlzz zK-t+h&!8O4t%!=aOq-*u7?rE{6-Rv7bC`J7efvq@Fr{%B4vlc6Vdx==g%jjt^8@-_x?c-(_bx!k8^;Er0F3Aqnj zhSwqR*KkRLh&Cy97y^)>jSJ;9?}Wl z3t)m?7O;t%8Tn$-V^Q0{;jF5vf;T-w<~|x+e+i-GP>qnjZHSM;LdYUF;$JFWrQSO# zdI+|2LBOv+gdiiLA0>ptJ(`8fEYFUj-vp;%rN zkx)u9MbGC;dqW^)kBSx*Kz8c=(P)Z5Uc9#)TnMVpgcqt{S+pA8C=-K~Mn$iS6Y}K} zUcTb!OWLTIsFddSSI$w&z^jed5}MGTQ$9>*QW+5wLA8$4^XIn0rLMzpG{E5h_Jc(l zt=|6(H~a(JY^r!LkW<%=G>wX};0b#rd&WgfgZv#Gg149!Fu ztpbyaR);~}!ly$Z(Qu1GhGEy^c_X}!=XkPr;l>%UqSe7c0pVeDhL34aEkqQTR4gJy zgwhrXr`DkPzXZ*B0{LF@eTM!+- z(f`Q0wxG>!+pVNO?S7X({nr#bx6n#;&yKEQp}6q)D5f>K92@E}fHGxyJUydOrFqOs zZr_+x>yA{pNo(tYfbw|_nC+QGunY5#6w~F0)dRA=kx9NH(NSG^0xdR6dsv3&`rq^P zU`Wy1@9N+Y6Fd|L9{GX!awkRZLMbcicu8J4v6FB{4w%WMT6g+uKuu{##dWX7ANup8 z`H6zKp|vke$W}b7axv=b&Gn}oY}9d(ZdI)*I-N7=VCY&lO^W5Ioy#`ARUVB){PHCy zm(vw^;sV;@y2i?2FC5cg5|3MB`vm!Tg)Rsmn|m2PC`jZ}a1t{hXoeItEyV3q+stGU zlLq`8;4jV7c<%t%NOwbHT~m6}0+ElK+HOE^A0Pc_(C+#5?H5t{_&JrO%B1pV2sAE% zX8KQ*W7=U*G$cUmr{PpFiP2e_QeJ$~%-CtgHG(hjqY6DNG=5}dfz0ez$9b9w8`}9dCld*I1v2#-TGD$GRdrtF7uJ=Ru7D)p#!G5-L9(w@zATMYJ-SNK^{ly2qJ0cz z;uBN_B<^z&$BdKPv}T4}G2xFOVy%H6G>3d?9XtIvbSzm&_tFc$)p})bJmYw*8TfOt zR2(ohBu(w7=3+;0U$wfpy@K+WXcevYemc=#R)&8^Yp=^0U4g?Z>Vq*^WK18oe8Zba z&q`h48AR|5y7sxCddiF*OieRhNH6NSF)P``pNii(#V)Q>r+(XG6mC2(LnAN1EjR)E zEd_7-$X|=;lho!lO#<`darB>WNuI3_@2|9x+Ce%^mEd37_<*^yOr(e41X*Hm!p@|W zJ*24eNDT*XDrk&dk+D2#;DjXysk$n6${h?;-Y&*tpx}B$d1bv=YATjZYyUaI|53@L zr}QJAm&BTLbKkC*W|c(?In9MZdR{dL40)z( z4W=4?n8sAPZpj;8_&Iy~%+Mh_O5H)DwQ>e+3d!mk$0oNNaDjSg9si2)YRJngsI zA*5GHSJ9E_>1?3OV8n6V?8+6hcbK3p9q0N8D8L0++06;;2mFB(Px5zDq zxB7_8S>q+Y2f&2%%vwkl%K1rK{*Rsg&YB};?P6hh;a;}Z6~-75lZu}vP>rEc%j3K1 zF(OaDvdEQJP8_8Cqvz>6E_)LL@4rPK{1e2!k*G28HD@Yj#Bg5E^{-o~Do92|r2pXb z!cuKuXl~wepEu}h^)8?Z%8dUTP%Vx9qb71jHE-%J@27jN&lA_N*ROCPb27Hsu}^7#_x=fl_K5Kj*0!2=32oP`mIm9u^Kn4IW|I=I8F&%9#RvC({V_iG+aT^ zY`8%lQ~1|ehR5zJV-_-|e{$v9fSSp_TxVt!!iN>itVNU5wHVwj_UuZ`AS;nd_$|{W z%!=;N!-71K%>Np`X)-&-o=-L_rFmkdJYH}=F0&F_PO5SI*n6g1;Vy6x4ugafBx-tu zrZh6f9)9|PE)%jgTcsHlBm#Z5#~XXUDGHz-9M)O>9=c~mE){4J@WKbdfJL_v7sM2S za8WdbXPh!6A}xG3ZUu!j_~%(I5X&b8_`^G|E`g$Br&#VBzrl>~RL%I`` z=|(6jrU0ypXxnf08@!=GLI4NK``TpudN06S8L7HpmEDLEW3A45py@X-97lV~fZb41 zr(7_KFZ(j#Pr8AVGvU|?{+wVx;-t%^VT`$d(Iaku;kzOA*vJQHXBYVKoBNn{?Y1`6 zJ}`UYwt`3-q)^a;vfw=lU3eZ_?V=Y?dS~f*YrdQ~2p6G@4)Z~5F^Y%JK95$Y0a2rJ zz)!hRW+B5Z@_CE;O@?l~pw#ziLt^GngzeG;XVR$4;150Oq(-0d7Q#VBEc;>!HWk?GR2KcXVhS-o1!`t02t2UKhwI{c7*TV2?_?Im~lC{Nma@%0k)1 z*zZuZ>z??x{Vy+wv?VrJTO&XM4igR;Yn`~V2r=`j^t#GZ)r#DtC7-(=f4A*=TNzqFmX_l7DeJ!_S&jYy z4Z}eTmpO#jhk&bM3YcO(E*&AR)umneIp!iiD23*Du}(mM9Ome66{ zaj`Q>*K;6ItvYK7Y}sOdU--a-Fbn&r(_W>YDK*^w{*5|!zJ%xt(lZANV1{^D-)#Ib z)*oePc_^-m6d6N>A!1RfLBaNgT;PP??DB03p45h+Aqh#Z5ZE+oy}p+((@PF3UZSyU zVZbzib2*WG`E_5)Q%GLk?}yJ~4ynFQ0tsCu;)VTBZX0Tu{5e%9ST8?SsXbnGzhC~K zMu`QnjO6yFb}e5!a#kXId^F;-0b-^Xm%P979h6?{0Qui+;qEo^Wz+ro)wu`ytzxhb z6fT`>d4vCc+JaL&tL=p30lkuGPT6akQNnm(I~VZ{VMY zBx?<$q(+E5sI;^?O_pa~PA~gmJuuJ0qTJJ>1R1^yAZ(26Fr+X`8Em9or)nGv_p~{# z0ha^?={5neO3$Z#{Itx0$qD+8!yAe$D|TPYAwwHy`A})6?*Wyk&d%xWp|b6p(!xC}Q8dy=S&7J^ z91&VCb9Y;-#?|hj^Ji^lPi+ASmEW|Si)dRLp@!7SJulf-;@!Z~muINoz1?^LBtg{* zk%V5o;3GP1ethS!l1zScH4Bt{2b5_=gPdJ{HOGM(lI|d$L7TL#dKsjg;Gd^+x9n&K z!T`UxTH41;#R$JVm>^>TeDI}g&;37eT@u<{aUs-F%hLG4yaOs}t80Sv4XA9RIPmta zEr}kF2$YYP&w^dv@b<0qZG0xew$FB^!!W*8+%|R6p#q(Y2 z>Q)NNdJS4;eq(y?f5)P6d+nK1CwaNJ?Buc~@a-d-u&yG*u<*a+`r~@J!$$C2KI1GArwP%^|gd*itc)^34bE#AY6o zoDk2>k}`O<35k02PU199u2WJD>!pIg{q*ZpXd~#>0+*7*0qwk>g6b0rx(SH9f4}^ntp-DqhosVC-Eo-8X+?bfXhbY0pl0<0@rXW#e`Rz@LQ>D+*u4&vMDlUm)@ zBvEiWdu>&FDvW}B+wFd}!<%px^{#TFT=JN~dZpA=>ea7%ovK*K{Kw!#b-gaYXesnE%oZ>lh`{Hdzb|2)1YWd$k946QyXBUrbi z>ssKCN9eFnUn#L&H`0Ml6ZCt<cs;iOJ137F%(ezd4n*cFd9B_N~A)3`m?BP|}hK@bs`KsRwkbI2mOKVzxi8Yq&66 z4rEULw%V5;5H*6%V07>%Oj`>|nw7Qe1AfW#)7zTb6=j1zf5-wEQ~p1{iAy2cs#*YR z0Rv__L+V>o<2Z2X9DD66!?v*tZ2D4)qn`C_#?L==G-{ej4Tk zkZ{@j?=%hr>f7V>UMctffGRUj$TV>5eMVm(z*b1&WXSyL58{^oh^nIx7`oAAvtu@R z^xA7BTrsB6U;cL6MAFw^Lf>qhUoI8EqzNYnyu!7)h#N#@EiVXVwDk}x1eiT;VecIb z#&Fw3N{_cvE`Kovv<)GzN=Iwbd^d`2EEz8H(OUkU6{f5Y2zChg;fq@J+y4^f_gYX- z`3tj-W(ugZf7n$1v`SACL7xUhl=4T|t<(DxKz$5733$TvrlS38X*B3LT`clR@RKI{ z!X8XP;G$WSD$n#g$zmbBKRzEM@k0F2UF{X;FMl#F`2DWe+$(x1<=Fc4EkX>d`%Ekoq+3?_o_erJ83at~=+IHMmn-4}_wS zp}^@bSWbM}31{#KXuqr8@imfwqbcp!C?5mN1#KIbJqPafIe)E6Z}U+rWdsP2eBWDG zSP%kjeeK*7Q10Ag8_iqI#p9OmxlrFP9mh{*_iJj#sE0m-vSy&te@V)uJCijKyTNzZRNCBr4nY z7+eXiI|9~nyA{Xb{g#3~8RRC$x0vrW}*uhezwMFz z$#g%KGbrh~Qw3S5J&%}_-Aqdk`TLeP5 zKdz0HK%B(AiCQ?sI(_1(Lpl0+6z2P}T9eebw^dwo?~%Jhim1HH&r0%tKVZgv_$u*( z!o46rJsW!Szp_Mq`&qo#89&EmER1C&4Or-n&H0L+#f89XPS%ac-J971(&ckPyO63F zmFw5hukKRFCM6tzSh@+f3wd95Cj!l5|7pF%J^9r=^fh3s86-6H(K|#zY6}@=# zyy&D%iMCAahtMHJU;r#R;k!w{kwUu>yP#N$K$Z&`O;*8N-usF>6!BL~vWYei{Uq~k zIARz{X`fF5Qr8lqdiQJTO~nEKXc-DlUqL73ot zlTmB2n{Hav3M@Hdiy}~_YTrp8#g{IoG%R7RR_UrW)(O?1q&Sz2ut!cZoL0mzEACuC zP*s~;gv0W%3S4i?apZU9hAhrrmVwJQWakU+T`3e!t!ycbNovt(7MToWBq$46G@%>cBs|$8q1z-BuQ8+ChF{X3nF5@J6sG-TvQN zx6H76hCh3~yDSLklA;R}S1>E>E}uR=AG@Zq24-G=eoa@LLtlSFKa48mgu8{@Cy)QL zsz)VGR1zm=zM4_Ahw{ka(r36D3B!knz%mBdV1;67Lb6C}<(|$~Ucfw)dR_WU&C1i> zhk;Iv^l_^$@mq;rFd=t^4P3f5NpI@f7ht;NkT>-XAQzwzLW62|_474v_PhBA=e1$O zM|84+`koh`Fb~T!6H^`+=6rTsDupGeP}8v~*W)z3*mSq8=}7#PrY_G$lOeX4!>oMf zp3IHO(Bgl>Od^P~cQ|fQeZQvXRiFHAS#X*z)v&SX#2}fkHAj-GjHF6-j-W=Q2h$F- zwFMV_ot`!yq2ms}HWl9PWm4#pVU8j>a(hr$nyFPClL|>zjE)P<%Zg%*r!uxhUTF~+tD)CZkmbPP)su2T?%$`z6zo8U_k~UlbyPlxLc~urc!d* z`{%QXzqqJ@o*HF<4!zfMOWx981?*{x|AmeZ@%o~jAMFQOCO4j{qx}^{c@24)R6)2g z%{Y~|O}czhXJC)K^4?KMGne~iLC^8(p^bP&Gh3~HLN;5_@P3{zDaqc6A}aRx6xe1s z#xdvI7ZLZ0`##y=HNF>i^M%j6rXmwyoZ@MDn=oigvLf%!W!@9YA*D_0k(j=(9}Q2z zo%6LoNa`bj?M?|F;mT0$RscJ>YaRCKIGdKv=az*aNQRMEqpVaWrwUV1QERuEASzvk zj{~o>C-`UeS!BcV8}>xFPGMttNIgXm>-$`ZJH{b`7K$$@o@Bc|$khFuRbF%l9NI zl8_PnUvWz-w`kAl+b=i>YDG9L{NbrnY%JK>P!^175KI%@8^vmXs2EMhXm)W`xwWVQ zXVmMov5-XB+tuTGsJ(3=?3wpuJopO27=@2SXrHTuFA6R?24P|B(j^ZOrfdA9_H;hy z@Qq+F@o%JTEyC#vl-vJnKh8T_&-ccK^^4Wx#yFJy_@7-6+x36#g2t$t!y-qoG0Qz- zz`Yl1EDS55mkb+wsWL}<3z-zOd&TW~P)jO8weAm#t|WKaoQ+fX#CxLj=bva(LN=9$ z!4}6dHK75=LiI2R*f1F%cApAQGmoXhHj}qmQ&+(&Nj)}>4Cv7#Lb#T8ywn)yf`cX| zL`HRfVCEGfp&p*%A#Bsths{R5LeJG)m$s$i+QNu;1FG-BBMs@*b>IxiAbv?u-|W}z z668p2t|qT#e5%E6nO})wa-wDb?8SyQd3&5%~Ow|RU#pNo4ZgU9pQXfD| zzQC}YnJ1tz$&T#AE6I!3 zBzu&aZc^4J5S3TfJ#?+qkLQfb_2;6?MWk17!Tp6vRDGXU59dv$DQ%!F4_lj@k`!Ic z_2d7pO?E(M;5##({K66{e`8|!&76c5zAIopyz%=#0XRp?ZLHq&-L){oso(l@>g0cD z0W;Zq2L2@PXAeh5_6e$^o@(sYCxSn%vmtzVr7hwEk9l$P0=P zw`pPWe-#-b$az*=nKsq2X^8fwkE59U9X!PU)kpO7M_Biyp(cnc8i%FbHx6 zciEdn-dJXaBV+ZJPtPrmz8EIE(DFjq5F_@5BVoD!q8@INyy6}H${*DthA-(*t5l!c z`n|nhBu)Ng)5V$w_mP|=tbXM~DJMx4c)P3f?9m6hExhtmYdlqn*jW83juVW$m|3CQ zU6F0D`!LTub5eJ_Gs!b?`kN-tsUZlXvy5@74n!Lec5EuNZeH-&mket1Ufdc3ZOZDH zej2`fb^N%@AiemmUC=ILg!<9e_sct#1gizffe^R21fpX9P+!L&v2V;W49-$%F~}#2;S$5Dk_Iv&^)bN*i~N-> zy}W}mc_KYDF#cDbL9%9ZrElZU>OF-(#2pc0b{;?WikJytfgjUYS6Cc64EN#@6fmf& zDDctd<;ML6C==iLem`b2|JXbc+t@smtIDZIqvC#5h6i(&c2Xc_K!)0-y(X0Zd%|)vjpw@y7Sp_Gg2F+NND#?*49xX;L0rozMmi(4d1w%}3 zM4>|^KbV}2EoSTyX56AN58V?MA8?e}?J`kE8_v)Jax?Morlr=sGtCdk(}m)*}eolig?p-F{>V!=7c6^NEp=!#Prt zx?N>=i+=YHgVNw;*JX@zdKHy#4JBX9G|BUb3@ZwQ4-OC3H^PF(_yI2^97vM<^Rn{q zkC8QPJB#-11@pq|J>o@WdS+U|bKgSd6V-4T7zHpy87qNTT#>Uzo#LN-vgM9T`d+5( zQG0D5vzIdCUL2LjP__qt>=#Ym>IGWlRX$fpl$*7gLB}U;ZH7Ts zvti2R1?~b;d;NsJI7*Kyj!%lKZNip7zZ1*b5TfB%(MVpBW>|*-2ePlVcVRI+cVQJwX?v44nStQrSUWo}QJ>=)kW6(xck>wdQigEnM!C_t1{BRI@3 zHy!=D<`s{Q3M}(%t!b%K-Pgxo9hKSG$k)FAKr9`s1qlh_!(Tm!AE)NZ-;|7I>Nc$7dKPbld2ye@KDBz^j1}+%@nhK$-6!4vT=~NCH4Dz9zX4r z1Y=z?3iG1vlKDbCZkZ1?gi&B%to^PufKCL3;^8dl{&}ho*+Iy!`~Bwf>65$r`eG+) ztCSGqZE!^FWj`HXOLfTZFub{J=hq3{ZtbO1A>F@L1iBKI+p28dO>2GTnmW*0)I_>Y z2M~IQ34ZJvv4zzR^;I%W@)A_7)(%<_6#VK=?j#3ZZ~qtG%C3eG+rq%=8jRG@sulAj zdHt%K)wEd{Osp}nh)O|q3$ZCxUFztI&5WihVru@(I3KY@x}9L)!e|?B_+Jy9j@amb zMf7vXUe^a{g>7$&vOp0~gfSA*jAFWi0U^oO|xsVUw#WApaSRRDXPag&^qQVBqy5_NYA3N8p!F2njq+nHxd}D&P05 zpbU@UZWe*g8HHRNq9LGW00CvQraBvJ4zV`|)>LfAVrz;d4OE2GN4kFT;s{D|@~7c> z+$ip&Zbx$P$^bcio%9Owv{3Zb&GpgA52TU)wDNAMp~kKYoWG}XvfKdn{?3Si_gIAx z(jC;81o&|vX(lJbiCmnGf0F2)g!1hlF|e6-#Q65O+ie*6tzS4+)-<-Z+!{l84pJ{-DH1UIoww(XFb5f4~+1>m9?WlbE)ILm8vY1%7 z%F!Sf2H2x6Z@V;fAricuPe6(}C*!acQLiQUiZ(iI8*d{IMkEB(w3D?M|B7<{mNaTv z%t$!CbtmA(J_*NH)yhfdTEfrW$Las`XU7G@@;?Y+&wUi;+cwQ6` zt=O{DDmf6XLIYJq$TynHou@`2>02QO)l!5{XrpOYojknc>sLG7=1kf2gnh+fUNpch z^Z;Z$(Vh!gjjR)TQ9|fisbP0FvFFUa+!R}a&;=RTc9phf*E~_Wu2M;yHf}K$>6{9- zy}7NKes0W#3ymznh^xJtRwckT7jWhISb}a&Frasyz~r0>1NsObmq*d&E)@O^}h-DQioqh{afPx zSGTDD^-lKjpLSAghNR7E10xR)m@%wruYl@6@#YYq(jb?97BLcv5o-PiZ#UNh0Ey3s z{L#$tNxTy5zZ+7cZCpM^IP{dEUZs4cBkdTW4zwC24K*>oYqzyl_Ezf)`W)86wXW3B!!cn9!cN%{O4PDh2 zvJKe_j)vf3m2Qa!fc}<>zq-FFy}Wi5@kv00zF?=kJzfoGiWYA_+{(Hn93viq``T$- zjgUE3(Lh`p3QR)dAflfUfgXtOqamP}OFTKk3VDNVEon2az>3*aH2$V_YAY;7a(mi* zrO>yvz(u2K;h9oGL&+WeX1L~_u8d%|N?QNHn+0gP-BspU-QR`vza;;+e6&|T4O(RX z>zx$i|8>vKPWSwO7o{qTWhsN@eCOZt3SZmr7e`UUBU__{c0UMxkRnBx+ z^|;)qeO)Dg6GDl$?<_ z{tm9l=Ym7}wn+eD0}8>>l?{zik$oyFRqis!8rX(x*?wt02`? z|3r3@8ZLvLxTs;0u5BUlju_n%6&)@!Vc{8}Y{_*nt z<<0rKvZ_^{CfV1w*XJJww|5_J&VRTZ-2LPFvhs6-Kz-|PuWl|c?mpi9WAO9&yPv9o zv^A8rTy0B%`rG9{<^sBhS#yyUAOU{y@!dNGcJSX<*MGbEF!=HA?s{$JlAo%j4$qR5{yct;mGRW}Y>tp_SvPH1BFQ=qeu#2Z~gx zbLOySs4E}nUTaT2-9jsxx{cy3Y*~_|OUljSDXd)kQdgGL|2E11xj_FrIytF`|FqZt zc2bsRk(*x<{i5{5Cyk5xMNwkg;ZbWkzfwZYCEz_xVzV!6E1xus#dI{Tgt8$nC4m)pJtURw#D*wi1bp!r%y|f+thzrMR>{ezflCp1?#_e zR*wJP+voq^O<6JaV{#2~*%ezEnsJ_2aWc^=L)Rs))BQR(=$zH@d8E5iY`dgKSwOoq zhFxZstEeC5j`S)6zR7C#btBu6x)?DoA9PkC$^p8@Lhc|O35xVs>p1(vnD{DwRD|7oJO=JG4s?hE!{0gL6YzaB_UTZ2(2#Ka zMlL=q$Q0BmtMwd*UgH3TFxHV2UPTN>xN-RZH4f0vm=PLtboKUl#dB*Ee^ z2M^IuNp42&v!Vm&h$*??C{%@0aWs?AK$NerGI*jHjg=2Fllwjny(F0?@(34<3}XQv z3W&>S5<|&pH2ya_Jp4cf^bZfw-2^03#I?~#1X3H1hY4{fNg$TYrFuk}6_&+8KXFif z`z0Un<=0p9g0s9v9(7}7XH;zoiagNSA|Pql#zpp85kVz6ktob)Y~!I`@O|vkec6|N U*?IZ@0{{U3|52QDc zVQyr3R8em|NM&qo0PKBxm)kb7=lr|+6j(a<#Lilj)X$9W$=U5`JBjc3(YL#kIkR(S zc_6YRAtnJD0M)H={N4B9!G|bPq#mtqPlP}076}vz1)%DOLILEM`cvTDj|7LLD}c{{ z_b#WHOLvZ=@K3u){eHiHc5{?>Js8~2qYOevFC5=OIz*1-2{@Hmm>8mv(B!`IZOqw>1R%PT86TpEQ6d>9bP`}O9Wl%U z;pPur&6z4T8{jaW;z1=%4xbXjmub2_C=x8;KA@Ph-@un>%s5h{i4Q0OF-2^oe&P`ciJZSg9LB&q2uX&fQpRG~ z>)qYmWunyO1CW@6!bP`JB2-;=OhACJKq47+eWf+f9g$PyCqlA_{0o8(LNgpDpz@<@ z)7WnXdZWbTcBV|pAgyyk+`!a@S? z$tgr2>`S4By5A$w#60jqHesI1IRzZJF`aZi!u*c$Kny!wq&u_IMTU`{mx@+yF)6cO8akS z?9Ko{gZ)1_J~=Ae|C6)RgZ#E1CdMu*V#a=&$|Y0P1`vmAu524M?fVb|y;05UJX2ZJppl43l*BJw z^k$3%i-1wk%@b&puC5Jxs^U8ceHMY+G4_coo;+`#HkC4?<|6>jow~?A*b~(elgT?AtKNRkq~(UuaE|1Z z2}nUeA|ODcxi0jxlE|e64W_rJz`^Z2>vXi`^$PIHanLgqz!)c?RD#v{hf|ZBUaPf5m2cL+7Ygq4@pF%FeB7drdh>OLKI6q5&&MJ zQIfvCu`uhx_gO+Enqq-s;1LnZ6bf4c!3_9ZP8H(RHU7vriWmo^01aoM?p9N6+5t-|1)57@OQUCSHag z9JL4$J`M?;^kzpRkod^a+0-u^6I({*{7MJMpwc)BM(`pmcRoQFHo6&f~NO$wD>6O(xwlcBB{< zi+waBaObI+>_vL?j&UfU(}8;lyfEm^+P7xhE_82)1VE*l6NbHKK5*%cNeEhX#GK5q z1n(ov?ZZyUCWLyDeFUnq2r{9BPE-z~d<^CqY9VtuL`VI8Ur83mfCeaI@sfjDapSCpaT11e^eMq1854zp;#f29cK!y2w$3kA z4XPm`^*HDiJ^E`h0!~4KFiqCzT<8_BUK$!_f?nW1x69gB?{8FIg^$$gbqO5f;4 z41Z|fvsuxY6Dr5Z`48b(GVMFPuKx>yRU_6e50p@g4hWM=uNj&Ith46Q&3W!Bwig<^ zXh43~>mg;sod4w@$8**m(r@&FV@yD3rn9A^*=bRaSg4ygVgmFs2W7Jq?h5&RMg<9g zL(tUPkWJpIB9+4_cb2#dm8MzS=F)1NhY(iQq!%ZbjNxjYmUsT5?s&PBhMXIlYm z{f<5*OoC%Z-KbNK4 zt*q6!BieN5a#@XJsSk^dwCHk~{a=K!839P(kz$yP5w^|;lsO;JbJHA+kKJ1f$TZm( zNK8|Aaq6dTn$bkON-5jrapl}z@{71jP7&AoBMQ3T3XN+6;5hjkj_Cx@H{MJ1jZp6@ z#@aId)G27l7(!&MDA))2qrBqm)e=iT6qNPv|L;GMR`tgnRt?sA?QcrD*9>^;$OCx_%)dTt4UG_j#o%`tl zGDgpp)qSpj^EFK1{vDS7RL^3qI~~fpIz|RWaR9zR&qoRumS*@1^qex;mRi>AI%+Ro z6kDMsgB(cpK;EK)ObZ#Er8Y0)zhc%^`0Q1t^%Ooyo1xCrAZexF2WaJE_N?_h*DV~tBF`(Ntnt!!o~4_aRWKVyL$y;JFC`)nox%P9^gOT2olImVwFx zK5(<7bu@vo|3Y_6vG^;@Gnr=`Ifd*? zJ+n;nA^XBKM^)96p>TbLZ0mbxr~P6#Nz@d7=-zno!N+?6imHJZN=!J;EML`epYwh)eXj^{oi1p$hY zFzbw2Ri~TQSt*-^92M9DKGkF`uw;=%ny(aPvIrE$n1smrJ4;YN0;Gyr#uwreNJZ4n z^KMswL?!A!xUT!cMSml5$`Wb3W&-3(^V*L{Og zK*nR>K+EN41Z(<+qXoGreh||!wC?g`SsZptvVWL4PR3|O_bh_bucUk6J`RabWH?8J z2E_PVDbx8DY0X)J?l6^W_E{xVFV4bx6=^4NYdx7yU9&muS&iUvyFFbwZ_NgK8hu{5 ztzF?+>^U0|Sa7R3POaBW{oTC(MAfo zVr1}-UZL7xXv{&woEUSNZhYtEG_ud!kbZ=QKOh#749Fp;xcBKGr@Z6p*tX_qYysT# zzEex&Vhp*#A)>HE8*3=ov)ybD;H8e{LxF(9ZH9?La5}?u}ffsWall$5_3KMJv`AUCNog^we zS?y>qdr6wAST3=urh+FHISc;XntE#e!K-Sqv%tySsx{P?+^>sNCHp&$LOlvg>PZ2D z$5!*Tsloo|yFxvnGdAA{1~l9M()sVB!O6+d!T#?fr4FPzHMzWPY=(UqH#n<~U^^C5 zyvy=rWGm2x9%vh2hYgT}K0eYxND`?K{A*P+dS?RcI|v0t_-qFJgKD5=%QFh(dsTiUY`_shpzc<-yDp5%Z>DC;A;P}p|t??oZ znXgPLiXdWqo|aE2kwfH+<`Tr(tQb-rPHEROrRfklMEzP0e+C>QLH-51WNivK_KHL| zV(ph$NDeqc*q3CcL-i>5dOcWT?5v(Kdo;mIEHTf&_YA#t`!ZukGKoWd;u3`1h86zF z)3i#W=dq~dO+r0$uh<>kfwV#*q(XsSkkQ**Uv$pO`Zrgz)@0@dWsc24YcDSLkJx@MOYaQgm4=cI-h&CuV_Ozf<&t}TXG*(eS|F9K6zy+EMaP(NV1eqx*@lF)W-m@sd$RABEsYs_Tf z7hx0?hd{AcdTrYjK_n!W!eBbnBac1ce!5&}S5vR}UXYk`z}bw^C*vQl0q@~nqMt4| zoi4?0p>Qu%6ZbP!z8bu8NV`q#@t?8k58?kAoE?|_KgVZ>{olQ$Cy4*jTRQd+0ZqKj z8)~mP$l+iE(7U*LgDlh`bb?Z=R5AnBMzt{nW-kcG#LLMp8M_LjFeO5=3CB@8)1Wt- z>g>XG`m(ysKPJ?y^dF+-6XuOA{fCu6T1%Y}ek{E&m{sh5#FKifn!m1*A0}bKmvF)0 zsGU^Cw)hIj))F5)ya*29;_0UL_CF%Nz7Q37ci{Hjv+i%Rs2ejDHe!M%``(_TVMEI{?-=QnU!n$3e-)50FO2%X0Ml*Xpj5SZNID}LH&Y~h4pr?*t}FWJ7`;-uphN~MQ6ko zZL4=`?5HsZ1O10h+u8qi?tmuye|}V6{~w>7pC9c1KGIiW|DTHKUt#s-lmiZg7us#> z1~Y%Fcl1?eWJ|WS3gwEbtwPQIIEIMPO=}Ev>2>RT%Wwbz(iercw|zFd7wXb=b=Fq; zZa2tjr;rJAz;%ZBqVTSb;l9yft4+=KS)?zi+JOiRDfxBEA;R}N5J80RpJd~=fo~hy z|3Q1}-y;6wd{DOkr^ko*kG-U)ZvO{gq4oC)=CuI@9L&_gO#M;J)B{6duT*xQka*L& zZ%r{IBwnMR)4ENm=DM4ETT`m-KKXTpv>wtLX-)hWb2cU+D8t-76r{=jb9UY@8aws!a#eJP>^?ui7UfIRa|iKeHjmquz;b_%DL~pDONb)`tq7^m9wNWVnWxZ z^XszQ^9?EMIk6vTY1l)kIetQSz>e9ON$B?M7;Cw>t+L8iR`$aN6-M}evq}{b+ooaz zE;g&%0+WrZzf3_ingWhDg`}FBC0MS{X*Iyp8jy_dcgBzEnaw%Tclhp#6}@O{=H-Fl z*M|20-d*D;aD)9HoFA9Z{~VtV4)Gs*Nl)GW-@A`<6nJa%Y_T{ZE6_!}@PO=?Ut8GteH% z=l`As;Og3~A+QhXeE)`7ds^L_Vj6_SpzNlD1LoYkE!GA(Hq^I{RR->ydY8p0#FJJ!dlgTh$SG=Gh$q`A>ggKyQ&h+6Tb|6F6l$pInQzfCusleKzo#B-+!CBt5$sfow3yv*|(;fat1VDd@D!g)($DlL-8)9 zMBquIl3T$JWwg6HmMEiZv1NyW8ki+_!eJ+~nA+=q5vz$iVobI_`DIdr{_mglm(G73 z9Ubof+e>Y5t#|9v|laKGM^k|7M^)B^7Y1 zIcm}ZhVQ35@6*(N{>v%jRASN=_2;R{|I;6o=l{{s`N99Qm-GbZe>Tt_Y2M!!>Q85V z`lxOj$T}-*j9bKlBwV`ep~fXKa0m6C6nl2EK(NVeUCT+8r{wADr$c(!w1)m)5A+pM zgZ@A6FU5ZkPR|bhpS`4~sQ(`m^xdYF*WtYdwQpGU1PJ}HJkRHVB?PVa|8HzTWDBmjo3eZ-a} z^QBHt(c0OS%BwE#eQ)2bY8VF}$ZYv^<))2G>Fu$B^*2UqmJo2xcq@3c*c4rE`zsVT z@1Yyuvk>I^4G^Z_{#tN=ivO0WJ^q^km-k)srMmS{U?cuJF5mxfbUrw&|M!xfApR>3 zv`0dLzXF%;eU1XnZ*-}7s`mkKd2c=&WWVQF`W{l#5kNzogNPXw&_0yC#t%^DCMA5k zI+A_O8*V0MY>q1oyM=Mui8E~0~QQ1Bf6+E_h4SlC|AJ7Tf?=9<{l5oJa}f&)jU);=e7D++(8; z!0!*7*2RBohW_f*9RIiE|2sN9eE)4P>B-{1#|!-}L%(&Seg{uI=C_#Eo&Reb`^i$% z`tPW}H2+Wfhxxyk^knD%BOd#yS^Hfv7B(#W6w_Yz$?t|0-!Ez{w1ZD37OFOfBX{T9 zS2WvfZdVkO^SHLyt$nvjF0DVDUL|?7@wbv6qrv~3KmrSQ6|`*cCsoiZ7W{e|troV3 zP^8N{nPR>rs@gM!;)5HDWdr4&+hvQiEW&;b@US-{1fzv*JuEAO3 zBn5DoGtztHTO$@IC@iubxmLg7<8Q!V?2 zKFV!H`+ia>Txk8eJ@I``$QB(}^+C)6&lE4`m+bF23LDC-sLxs7RrjuL$4SYs?xbQXhf4{*PU(qa1 z7$Mp`toI1ok!vc`tl_o@|j?+SV;VNU0)SM5$lZTcVt;Z&P8~K=fWut%uRGE(qFj5 zG|0737dbr^%bqT5{&k8kyFR01GFf?Z528>HltjutYW71WxvKNJ3 zyQ)!HR#l}UIZ}vig_3VprztaPxzUX>S4wM!N;))U?9cr__g5MNq=VE51tG3D8^N$p zsp91)ki|y`sTKH&p0@wp!N}u`Nn-~kC(%tZ9+P|I^c;k~eCbUAhjRL_Mbf%$72T1^ zv(6!>G$oO9*9R`WF$p1c$Q5Y_e+o*(Ki}S7-7HZr8<=v5*kSCkGg&BlK$vTxp zfBbO$?&7wZYK0jH*<@lUdt=5UENcnhzIpfB`|-`|x34y=Lw`v|OQ}#5s;ukg;@#ET z*Vo?l#qCm$G*u&T6o4EHmU^OMew!Sn1$`~oyBQ-}tIO@D zP?=_((}8jHyh3VtJXSz+?ot-O3-r7uTg-wNbuT?vw~Z!w>M%sd`dyQWC*tCV(5*EZUnCMA+}P0sF7sxH zmre-1M^?WJ#KHhLl2c4k-#zNPeUu1Li?M`5cZB_qKm*}gI?n~zT^}**^(I74lacGQ zsP{1$0jE$RE*5h4N6J3bklJ=1^tEK*JEo*aLFbaMXZlkZ5Nt5E8j~q{kFlHv$|NWz~{Xgy>;{W!Nw)J$CEWaK`p3d$i zJghSJW=NH2SyTVvB9T*bCEn)dzPrN0?*#{N2nYvX{&M|B3_IQ8M6Hm|gkvgAbp8$;rkDJNu~K~?nQU`2>Wg< z;@3H9FHT|oT1{iMuGd&HNA7keU$YSYLiC+oTg>EG#4RT+XCc_7IgZIsoF(xJK0E0$ z{n~7#t9-ewr3hE+>MKrWB!mgP z7CsJjR}PUz*d2t%jE_hV0KJ(DUxv1M-+uS4dMC_CwGdCoYc9m?MfiS*&Q4E{i=jOc zp?S2p>bW6$mk4>C&_l#bf@80R0 zi(D?%xXgQZkx8`J5CLxqP3{*7YGM~u4K|U)aR>_F<1inMyOU61=f0b7vguzC#d7|N za5G3bGanYwzp`D(E(+Kky~8}XxO!tUsu3zAQ2DDSeHmgB6^!&_Yx(-rVE>g`?fQ&L z&cZmv6dvIFPyN%S?|%=@2ImL+zmK$kqf{JCOUkzH7`i0P%W`JR;%#yIve85TaGZZG zTPqtRzi6RCBIF)+=pHt;ng25ackAByP)v>f|E2GL_D_xu_kZmrZ7}~Y!$e5nTdDyw zsP;LDjiHZlER3b6LCgr1=2xa%>`xZhzZ6i-{^*5llK+S~yPxZy_DrD?msM{eaA8%4 zUKEsc$HW2&twYowcUbzS5*B;Eevp zMy5gbkM*dVVUurVnSedL)a;^ksZ)HKbAbNge5aXOUvzOS7F=nS-I?{DnE@L~&O+hF z%51)hI!XkWU5ZuYbgpJ%6_?LcQOmRC_ZqDbMyV1afq(Bx#=^>31NSj-l1`JBG?k_W zDy{CYs ze;f$V(B7{R46JI>j$uKea{~y${<*f09|ATs)Oi398v}?Pb^2pNjYE2ZsRjNUnQgqy z-N23UA7|&~?|%)>4)LFRNgMiqx8K@*LT=zaZXGYQI^RRC-9dhcIM)UB-G2T*nE!Xs zKP}Jy{>j-P{%bF3yY=7JYT(gkZ*dX3>fo0`K^~*w+>5d!h#NY(7voPiIT4aZ*FFKg zaY+9jsZsx{Mb^W{{|?U2PM6|;j}Q8PFKNf|zpGFFJVO@(u`&7(1HDm%(20Bc5>bEv zNv2O`bh`l&sz6uOhGWv5;gAGchoieGP(-MZSl@z)W(1?3A&izo>#F$8;AE6|2#T;|6?zytT{6^nGV;l zKvCA6y4>pemUQHgK;p$r`VE~Fukz`xe?w96Qk&92y6k3NW&X6*%#KYKUoGb~cSLg# zvM2^j1N2-U@YTVsWo>f@BX9gEpf6rD<Qv?9jLEvAtq;S}u==f)#pln@n;CvlVLWSoRzoj4cB?LE zUTrz63KpkZvsGU~REmaEp6Y}-?? zx0mLQ&bo`f7f)igydSsrX59AmE?kbgEWO3s?l-&I{-m;9#XEw6fS5bAytFelfNO!= z>WUU&TP4nUY%}`Rrgr$x-Xpgm3~bu}IbMqY9SqJ7`#*b0P53W$DL#1^c+dhHrH1)$ zoiEU}UYD-VgI$0Bf6zZaE`R^=u>ZTK^vv4viA2vspy+wX*vBMB&Z>#w?)$8vcL+;>*Gd0Wy8L@}rTximydF=2T^Yn*GE@-Ab>JGj}=J z+Mi2yBNcgt3{AfOQDSf*i{weMW`#cLei^R5@m`{D;GPI6lpHM01&*G>r!0$Z+5#u( zNlxTkr+-~uzY+*@KoJo_$qij;ZJN48k(>hZ88;t$3>52^*RRYc0bDyKx4 zm#*%{#^x1%JoeB%q#dLN{eK6eu3D0Ivl!Y9Fkln@JL{MA|KRNOaQ<^Ism-nBmH6;7 zJ2t%x4a@hG?*#y+ESS5^hv;`r=#J2U-MN2z{TJ{0!-v}+zU_VdUi5rpy+qNKW3RsO zv6LaWHpd&a!tCLg^k#!z9xC3eYeu?Y5znNo##*G8-+y6X#G4qfP>5v{h S{r>;}0RR8W*~(Y|OaTCr2Pkj= literal 0 HcmV?d00001 diff --git a/assets/rancher-kiali-server/rancher-kiali-server-crd-1.32.100.tgz b/assets/rancher-kiali-server/rancher-kiali-server-crd-1.32.100.tgz new file mode 100755 index 0000000000000000000000000000000000000000..02cac3b94d4587d12758562b2b9177d167909c43 GIT binary patch literal 614 zcmV-s0-60EiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PI%JZ`(Qy&e?wj(Ra2gCvN=UTha7Tyw_pKfZiFIjkU;W^yW|lyaWu z{Z~r4|CQ5XHpz0C=NI$od|pgsc2N}h1mww)e*D}uKN;x^i%?RZ?x*OOSVr@GclWrOv>c5pYE@~QT{VE-XK#vSD}h03Bh~( zFN*0N|BHDcNB(~V(lmX;Y4n;Nm~6L-V)Jk8G+>)08*Qr+uG+}1SyFUuP_&?XZFSGi zh8Qbk>@;MccH^vO7qneh80(e8u8UrbqHj2%T@pR1?$|oG*1taY;xgRX3lzYR$?db- z;_3Q+*XYRq9rO5KNAA76X#<+WN%js=+jVO&9CS$lqH>);ed*HjsHn5;6sk`%-Mzv# zL*r}AhD$qkRV8#vIvCgmT+?fds;R-DPypx2UVgo~n|*s@1{mbgt-h1uXX8Vcge2-B|a-ruUjm6?dV85Igxi%Kzo{<>K~QG}Y-n_8$MUT<*{RSyqhv{|NkZ zdDc zVQyr3R8em|NM&qo0PMYcbKEwvD87I5Q}h}6{A{IWsE1`ct8;SdD7Lbz-^Q{^QZ{!h zl}a!qW(F&gU;t3^Y}WU)-|7ZHf*j66kCk_`Vya>d2WT`J{RSGNSdq-Q8eLUmKGKF6 zKFYaKBGEf>qLpt(5sU|8=w}*&!IF#4oWJ zD>f7P|K?T3wVW8lBm%Isqb7S_{(iUVm9wJa+5f7FPh(!yEpvPkt);k@l}b2aaeD$> z`^DLh_g=<(yL-{+mX_b%vqJx0v#jF!0Sv%u{lE8e_tl%O{(rsur2ik~nPzg#GN<}Y z#_=xzt8L%$39B-5&Zj~fH9v&GAd0rYy#~u4Fp;W9MaEI1oGuc@O4Kf@;*rSNltxj! zlp1x}yw)H)xll?fp7yX~pLPK-Y>KL!ls7;-b&qLTl!hb8Y z0XYGdrUH#I%OJ@FFg2}kd@{g^EGA-#IP`GF7O^G&$nrAdhcFu10Y}rniE_k?L?R+X z7>zPH9Zf{Whrmq|BQ>*(2wY07#W=%xO*JH}z(t$Lsz?E+dk2F)&g6I}XXqj9q%zSv zNhU`1l;S*%_(!AIk(yfej5Jr*A|YWZr80-`Y+BFs#(^lO%}BEhdo)B!^05Ptm>MMny4agviLj=s|W2bJb<{`JvYP~7o8VNXM zFmgKrY#B823*bm~WJzUL@}lGkI`JBAW~kK_YPnWB&@)+ODT;|*&vXci|56FX0Xf0~ z?<_WgzMOH3x}1>Yb->z%k3D${X=ui{fR3w#c3j445_s=C4OHf&$}-DJJTEh5xOZj= z^f{k!#fyYUBnzTgE){YDSq!2ax3s;P@q&yEf+o|U35Md6MlUqV0Amyqc*FFS-l-MW zsFdj_k6i(8)tH1p4dE9-_(;kNU?vNxd@HA$wmd7(QO+2g$rLVlQYo}p z)^}Lq(Li2l6L2?zI$yKki7XOc8og5z6wz~ z4xA!09m4Z3p5F#WYRz&0;}D*wGPy#FZ@=^KJV6_PV{SHrr>eq;oHqcYP$>Q^vx-o+ z4upmMRu+bTG%cob>Q`08kv^5hxs;~;@()T@|PK2K?=%Z9tb!=_H(PSbD6kg2@TE)AL zTXUCJ&m=`V76@r-9DXNaZoC?SOPO)o_<@H;vtUN7NJpj~Vkc_;c8k0PM~%=sE5%au zDll##Pc}sbbJW$lNIb$5)u0lRI3;6RpE`ewLJmP5O|*p$OS_4W2iHx{T0K=2BUX}^ zPZJ5OU^?DG1m6j*D$2cmU8PfQ9F}l&%|wP=3yI9^>MxBt;p;g#!5~kb`M%Ls*c)RN zCkb)Xyr(eOW-AjkZDLx8Vv0@-*JyCP2ScdmtPEZ$S^QOwn-?kLxhymfy4jeKoc;P8 zFVa$q@Xbsr@i+7dvQnlLHYF_Ms%bzNY0aBAt${O<3!_*u4I1Q?A-DL3k7rU|wb-m_ zRU6RYmG$gNxr*?;OoLA)S6XPp3v*4{C&`$|mwz36Bh$2A^f4`Z>&xbYj}OBJ?j80; zCraTms_<(YVEj@^!$$W+PVl6uQ- z3B!zG$t+}$XbX;!P0`OpZnxlV4xwaPlYEax!B;+>^uk}emr>KG*SXk&G1L6@0Z??z zQ!8B#Dw>~~c87@8X&s?hjpN{vTsncGsLV%s3zFGz8%d+Y5;D%Nxed4| z*OSOXCqh=%Z%$?seQ6{EqKeimwn-EU<3xepqvjft8OGZ5uQD3VE64(PaV?ZAY?1@R zM#zLQ8FxOpLuClMN@h6xdr=5ns2D?|xvU_SaKnn$OoDDzVP|V>KqS%U5yh0CXQIF+ zEd49`xn{<59H8I{*P5yM(1y%Bm)CX`l1gQB7%RJk6T=leZ%F+-c0hAsw2iPJDXT-+ z+uhCW$DHSs{Cj=yUby)=McZoLY5j8ly&#r2Q}P-)g2C078r)t1&O7B3QE*_TAorxU z%54wZN5>Qv#|ZG7Fk^&7y<}?2joTt>cxo@+JBWXe47Z|@HH{>^7(!+fza0cE710jB ziqr>l@ZN?&{pnW2GmhZq9bhg?B^?$;gaf~C0;(RNuR;A$YI6v?G5y#6>dYsfo;F~9 zbI*$XAD<@CJEtEmk1t~L(cEo}b^HG>U+%T{fA)8GU%q+T|9On3C(nVl3ba5ySgR&8 zV+Jzv;C^zQv}oTpoExO&TgaH#QB79VAJigp>1{p023S?#dHj&JI|wZuW62YbcQ zNkK_s%DfHX*I(oTXgF4ZM^> zCmVr|$p_U{-V89Fqw#h*KM;n@qonr?G;J0t%W*>28yOk5{t$hSIIs{%k_q%`{OZKf zlcn11*+~EYG~*dYaj_}yaui^V{y*5?U(o+=UhX~V|HpW?;M|?b`t{e5cbx}z|Nkc! z1O4Q;AqFt|^obgZNv&S&2?Bo{toP(V-K|ujFcTR3r`{Rd2>=Fa-M^zP_+AvLywPw` zm8j*S=+|F&zKHx=rx|V7%+j=u?I^=W9yJ|bvuTZCgiXdglQ-^lka9zfb|ckQCRvKQ z$!&P)WX97f^Eo%Ec?dE3Vh7O-2~6KoMlne4X-nS`T=DrK48j9;1DkB9FtVV&_<~O+ zJTZqbIF%O;0eme>geTP@432-PST<-=YY7_KC01K&ta*OT)mv1-io@W)G4vVj4j_KZ zO1sT3xQ@{gZ~;*K2jOEOg42`KZcYmlTC_|0=~J|_QQxZL62rB~4{1AaA~J5~CI2c% zflg-p5Qyr;2*)2we+nbqXr$0Pj$FHjDoYG*LmAIMu{|P7`BKl#*s{)6{2h#{iUeN2 zq(4P|QB5Y|BMe4^{s~2I?Up91$3V|u4!DkVh7wp=(j?;Odv2jqdxK-(gfxtK!Ya){ z%e>{J(bOk`XQ}mXC^q*jD>S?q&xtJWPA{-6ZJL)n*^Z-=3Ai(s7KlqKx#Wf0bT#$S z7Qg?@2ziI?CRA>`pdLNrzpRPt+bnIc5%oR);fH55lf$nzU*G`GnkW`Zd~PXbJJ-0j z-rgF)Mt_d}Qx7`O?6|jKxs|fzS+OfMo$X?`KflrXwhtDZE?en#v)JwOV?zKySK2Sn z3sEFlm9BNEx0ivYd36ch8D!x87;FS&DK(EWG$=^GP?W+Hs!Q9X+-8OHDl=l#Gzzv4 zKosZ~j|KYJHi5XY7IR^PooI*f3klSK9;3CbdWhIzcsr4xbq}>Zw?p)A_H5+;I|nK_ zFLkR^!Rzur2X8v{ANw!&clV$C|HpWo20-_yK65%|iC4O^eG9)>5R7P`5x~L{bnUKRH;B#;_>vKDzBjFa!ikq^Wj zUv{{o1GasPvB9`i#k@FHr`yuesPxV8qdh<#6t@Hn6TEM5SWIrLCw*T+Bv^=ryh!Uf zZ<`-23Jd3b?NguBZEXv-p}9688-(Y8SM+R}Ep)aAhS&AQRtsJ0L&@uUyM}GD1uNR+ zp@pNab;q6~uEF3-J9fr{=FnX&ouj~oZkdJ66^?y)aosGdhkEwm-A|`S?~m&*+-AWy zO6G^bOR(u*7d0%rKVxQgSgRS*3rmN(I6Hd#4;as$??iBM%psr09 zWIx2_81yQbO{)zN4RfKZ;5N-R1YDoTUIn&pv?0)CN4i&yv8>OAL)M4HSB+6O+7ReE z*63r4C7^;+>Z>3I%{BzQ@~Hc&p;mU>aPZ(v^NNw0R-1xtpTb`WyWMot0c5%cENx?N z2CTmtU={TK9(Nq>RwHdZNKc`{%5i$yZ$8vI(3^q^iY8V;44T~zaHsNOgE2ZiHXbgl zV zZi+x}?aj(@dwXm;T(^{G^C%6Nz{^F}3SZP}S9sVjs0fXVd!K_2;+r%9> zP$|=|p1tr9z;^e$0FkB@#R=O>hi!_ImJQmp_-aR%X6Ncly+hqx1>sWALG44_s&eR# zP=7hXxc;kFxVZvEGj2byQY!Nm6-{(Pyw!8IEb*Qy@fRb#rf(2X<=EH;QymfojTC3; zha#K1%CepitG-~sU!fCFg1(EJ`*jxj*}PDb>{xn&5Cg6Sz|CDi@afax!q)a(i zNZJ;0>VW$!yeGvznYiW!*ZNG!F%RR=nK9)bxM_!`r6uExv&_u?*8XY{Hh?IE5iEPh zGd91#rAhT6yb2l^k#kv@p6_r)#H}&=hoIEm2&`+~!lX=H+trg{YkJhlwY}ZdxxDUu z7R^`-H(m4$xX^pk6r!)+4w8qyqPu69y7kJOp)H6FO4EOZ`eah?5z83<4n-GWsOtl8 z{pr(Ii#5o#O2(Jq);fi{4!!U|Y%g|UgH0D^Q7zUJ@O8tRsY73-8VZVicb8;+V1Pp8 zKli@fLp>Yq|28CWx1-%|8+hIMpWXd~&i?QI>o>bk`@fIztoKd2$9(F;JXZ7Gdoy>` z65^+Q-8=5<+6&%3lT&a}DhJ z`rnre^}h#C^%B2I%o~HV#g0$Md_JQ&_zj%a={?7>z@0U! z`|z7PocRdPX8K>I{^W%f{s;B{_xIlHchCRtKKcKT^DOHB_BAdDpyX&aKhBRF&~Vc>cc9<~%rU_MD91x1y*%w%Spfqhe0qS7n)Tdtti0 zimD%^y%W^k+2`yhT}L$*DP8U9E-+S78ki!WxIR{vi{cRW>ReaOKHBRzUj_GbKI)); zp8<*VjmWs&2=L_n4Q9~(wV%!Gzb;2X4d88K!S(fj3;Dl;gI7=X|1qAP`oHBqKwpL6 z{i*<;%KvJ|2!-snC4g0Cx8p|eY~e1#_B{q0+}9B%2zzfegrygQ{15EKd#~c%(e9uR z6S~x-zK`(`et2%($LBw`N`^XZ_(#JF?6049>LP+0JrPtpI1wD%)19kheMkoChv${% z^wRgV_j^a~5bzz#6F;}?KE!hu{eRmU@U{8BSKa!*mv7!Y<^LY#`ONzNsS5ld%)tFC z!S6A8Q>2aBbV-@^oumH;Db@as=<0vOvyuM4i_;%GYwExE_IDTdfA(KK-T(0@PoV!_ z@L%9rpNcoC;@j||kY?rm58Du4=EQ$ld3lKac`Vo556xZiH@$pw(#geHrcg_U>&OJq7L$XU?82H)qcl?b)+C8nkD3v}n&B;9`?!aC-*o zT8OUZuY)86`s{W!d<#LzAZ+Y-YVcz#Bq1}nqQb9^Onw+-LYu)51~<$kv%!!4Nb#sX z%iN1@Q~S4wUN0_98Q{Ig_635ABaa=N#o>Ly;C%-VY6%yIZ-PaH%gR#s;Sv-Y-W!{s z1o6)31oesc#iyxU+>9%lBF6iUytKS=5fT5bD5iZlEiHPyFN#e$16 zaPq#0-By?$M3BPb%gvZ2ti!zT=&LIaAdX0K7IPicIf6URVHoR~sEx$374~-MZP$D6*u4_Wn4vifcE;rB!EpUmSE( z0kpy3z8LrZqq;KPO^0Uik#q;T$=0C-=asKx%7eBja-TvR} zgKqwJ@70@wr~L0@Je~Y+?-rlS|8A0y+peX1(~Rz|QnyVm_8+@(_-~SF?M<8@E9KK#Yfy#(EKxAIwcL*vox<3{@5|AO2dw*c3j|9|zm zd;kCLo2T=CkMk_4?4|X7D&YF<6>xRx1fcJJRR61r7xYt%o^s85oP+(D#&%_KPtWJ| z2+J>I_1a~jqQSQ8OtY_bb6dp$EZ)iLOBig0^q}XLG4UI>UiH;7{)&mg%$RcEDPUwG zvqQLiduHEA)xDs$^;?^ZKe6K*6f)&!bt8L$$k{Sz?ZzEy8ais(y7*@KMM$j+hw3Rh zS-Hh5d+tRd1pi(iIsJ0?%e^Hi?pYN3uL1FM&Lm5}W*JJCIyrOtp!KeQEf@L?+U(!1 zTznv{a@)gRpIhrU&u#3#`^B$Yy8-Lszx{>yZ~x`q)A`Rwc{cv(wN;LS_nMzhael5J zq*=a%x2+Rg(S4~?vB`y99k|Qy?|Q1^e$3}K`agFj|AF=Y{{G8X3;O@y^^^X8jAyCy zALolMV`u~1*P@uliBw!_`ZfNpWpG~NQzZGBfAMGiGB>5wtAfl;I=0P3Cm8z;+)J_y z&^phcK5^Hau|USb_|m~#ze+P%NY#+bb_-s*+g%%Qt+0EesIbAz&Hb^E>zwO;viy(n zY-azBPK8e7HCOY|?TbOy*8d%J>;LxOym_ksc$BB7{;&5c@CPdWTd4WA+PB4EzctU` zpD|8#MG@iA4@w9Q25XMu(VHnZi(Oes1qS!8u-@$CQ;mo{pRDa4bkyX@glzuYP5kis=v(&zKR{oPMw*c+ZM7+Jb^lNu|JxQt^rS$WyoxW(NPU z9m9trn}aNHJB0tA6%|u+*x%hfSlVf3OnJDob8~YOGa4qA zYPyrzq4Z9K9WRg1-(SGd={tD);q=|f<;jQB3;5>4Ied34y_XM=C6BIODa zQq2yMYiJDx!Lb9z@QqZ2M_H*-YOWiq)(h^}XyBj>Xet_Bh;7?d-f%TUOTrU_NMLk= ze-F_hCalsN+c_xqE24k`>y+c-;kdd=W)86-T2Pw)cy6(01Usb6jX-Xa3SJ1YZI?&S zM2V0lVq)fK{1OEH;??edZqsmRnl0fypvs_kPgWWuHm*HHu?=Hh@QFwS%i7QbJewu| zi>wClLMr-84YtEoX9dyZTBH@iQV{Zo1HeCWl?YAl0#~jo%kh~N0$P>@R=c1)&j7_M z?{XSC2wW+i+HWUB@hco6m#Mv60|)U7Y=6}XzaCcx3TbTBI!|#OwVaq6WG|aof<&f# zSj!#(a$szOp_l)rILtZ-4eB2#bXi256?6Meab0C39}^{W$aykjg-Dne5TjV3u_f~i zMQ<|qaRLl1c?jIFJwXSfGtETir9gp_gq)k^lownvY|@%=sJ#+dTwCo;exQ}zoTq}( zvB_ZC-%EA1pcHPTx+0884D;t(mU3X%Ok`}FdHE0Y(h#*NGGM|S zE3sNHc3c>w3Nf}u+I>9#>P_lPg4114yDNfLo3HMFAPzi!Ocu&BorW( zn2LgBJ!`(GkG-Cnw8Vc1UGh2VAvgOUr@Nu|L>R?$CTjWQB~!#HNEez$&K1w*7%5y4 z;l`pMKEaTF+b_B(3|AAnwn0vYfsL$*x_}f?hs#N`>Tgk%I~(7-;$0c2MK&0(CZH2I zZ%b=D5LjzXh=<)SyC*5liW%EWs-?&e1EDae^<}}b%&To(jdNj~^6-8KaTj4u=s8iK zfk=-pxD|et$=t297NMhpTA7ANejoE0%O)Tv%bl0atUV0sIr-lUwf4SN%W?vANI=O# zBtzOwU>WhzjlzBfIUiNws1GOwA<=n5D5R6ojj#~?^>C#rs&!X5hAaY{AZH@O&Y93g z4?~kxyF&I>bseg3p>@U4gd}7mT?@PXX!%?U(fe8<(T9N=X>mb7ZKPW&bW&-u@-$*j z`qH_P-;>5`OcVd;iO`;_=f^}Ax)ezzE1k_DXX*-7pK81#?{d)%TBQ3$L2E>W?d3#N zR)bS%fI%p$@nBJeJN|V&Z7+dqU7nDDs2}srpfF<^#+(oA z!(kJtN@>kCI=q4SSw*xT!ynMC!eQUm)AybgT-cPV=hfa|CJ?hwD|432ARrK+3K`F> z7e{`bbkY@^MZd&cxh0;C`;sAB%wjufM^>^JJ(?Ka(2as zFk{!8ygZK(#nEyyK|fD|=2jCE|JFk$`jGq~jz^;)Wm6c_N;ifFI zxur7dE{B@CCCIi7CO4j2AR%>Yy;<;tYt57(>6|E0OnpSdg|~g7;J?t@z%p5IXAu*b zk3~^?_0+A~!B5CWxX#)d>72Yaa&CwlTncaNVmO)L`q#0LHo_oJ*Nb9=OT4lv!>>un zxxoEJW3X!9T1l-(L@=BN?Zt3qKZ^nwWb8&)!r)XhKD9=GnHu@VC+KL!RSJ%5f<;`r zP`iPXG>bF$)AH+)lc#EC+`3|IPV_F54@RA&_&}qPF=z6;S7ogOiYTt0=M|>C!iYKBJ zbU8}s`qt4QJ_{3NWFNYb#D9?)^<$2MF>T_eaiuYi!uZ`LbriCYlDR05Pi*w4gWPI} z-!u%I31U&xh?|?6O+obF(^aW$&&_I24}*|PB{{SuS2#^1r3LcW*DK*jzb|N%|;U;tq!1PnNm#)NY%s|GO#&9ak(z`B=w3Zp8U(Rr7 zGLK_J>xM!tuF@-U_^(izx0o!H>vjSP#H7(PXfmgb1zdzL)o_u@%=>ylmdnKkJye=> zB!wJ^Yp(2KnVC@O2q!tO*QSsv$AB3f3(iywmopoMxl5TkWDeGwoCh09*GWE>1;HzN zC~b6{Y>~#Aw{r8f*hhl@%(8upUJ^A?1-Y%1thWw8C-D_51p;yN z2vL>_Qh{D;%ggQY;G5K5&z6w|6YLeqNe?!|Mr*iGbES8p6Y3>e0Ou&!(BcOPy4Q)#eM zy?aCUWh`w_hGILV@D9xgp{A>nE6r1GH-bDv+j%E#$Y+u zfk&f<{<=kQAEg4v!?v+!T95It$vU|(Rq{JJ@7&|?BDrQXEwPG}09xWZQ7Glg`S*FsREI-c7AmLhKqu^Kf% zt4P61lLZ?#3+7~BH>kcqTscc--4HcwEsSn2?lwr;@f#dnz{$k`zCOA*x$wmO{^atH zAHKVU?~l&Uk4`U7jxXTDdAKeA;Tt$Q{R{l*gn zlQOTT=5S+4MFEjoRIzdbmnWCs9uML4!|CYc^qcdO(?1-)KR&%2!u#X%w|_i3y*&E* zlld|{6d961HY=p4?zJ3sqyacr%v-7U;`hOwG1WuXP_ zI?&FHjSAcB!pc&~QVDd=X~GkzC|64S*J#`zUuv_BT30#6DqdX+O`2WHMATujRqAe^ zm9nwnM(biQYkBX>7{0AZik-d{_IKdsF`S^~1%8dZi+ETVg@lZfoo2?Rng^*CzmsL8 zGGWS~;L}V@d6Dq#VZGZuY^8GR%+FeWe_fDnZmOi#^}dB4(f~`0rrq&= z`LNovWj>^Wv0t)G(7-MuKugPVHf`tdv9~`G&>RlXB~L_?WD`XqQu?LWq2pAS(-kc9 zK)r@eW(-Nh6)?yvsRmvonbcP4j}=Rw(K>~xdM2wZMHj!u!$k*y*$(z*QC9T4FzjQ! z5lTV5MIeUnIj~GiP~5h5c~Y;%sjcN3t;u7xVHdfXr8mZYYjw<M_ySaZoBj ziEGrs1GE+Golkf81<~Y8i+VraQO@tX*6Eoh$-z`I6W3l-Hk*k^wLkJKha-EI!G7)$MmrY#qWzL2>WEO>K$rimldP27vEOWW<>2G^ao5*&-c?$*4 z)#S%aL;@KrrWKoVn96IeitbT9mmX;xbiFvQ_+Rk^{J;0J;{5-I?@y1=SP z@zJ~Y$8nzCcZ@ai|MKOlh5Da2yHEE&KFYHNiz{z^KdMZQqLV@!mSxuI{Gq2YoASc^ z_yXN5eYmqT6=qhA=`ygU5_xB_-*y~DTU+o4Zs44k5>ASVjG~`^{uzT+nek|bNGSY@ zrD-3Qe#D^Q*$pma&wv5Is!Y+_|M};iqv$7ejDD!UYbN5$w^NyD`au8t2^;Nfllz38 zKY8`xre^#T&C8L^Q!$v=mL>1Y=owXr(^(yC`NR3~x5q~p$3LAOy+8i3XTm*mf*&Rs zo9Z7U4|Tr_bu^;j<6gi6I<+byetfaj`n+wA$s+{X+WDCis$m0YdV#IGV!OH{U~&i1)Xf^W^XBELu_3fm{A9O zt6WWfi)c04Yspr(+=OuDR53~iE+-tu4K){4)ZyXSCj`2FX13k>d2PWrO6H&qhG>0~ zAJGvWR@Czm#fXv(hf%Z_!w*w#tYQ$wME>|FlK0wy6s4 zer&=or;Yh-{vTJl;5Tml5iqUB+fa6*hzRtSHm1bid^cJr_?l0o;$EkSV!DKOqA*JC z(4apN9k-GO-pI_jMR_zPcq386+m7K=75I+%T#yaw!shxTv!A zOy0mX-4?PLqgV#1^NN|4@N*Z^&o(b2v?bzm4L_v3>gZ<(P2mzH~d6 epWEC%Jx|Zm^Yr}FJpV5M0RR839yw$H`Tzh`S^8H1 literal 0 HcmV?d00001 diff --git a/assets/rancher-logging/rancher-logging-3.9.400.tgz b/assets/rancher-logging/rancher-logging-3.9.400.tgz new file mode 100755 index 0000000000000000000000000000000000000000..1ea7dc7d3af05273a2aa7b923a1a5a067b805105 GIT binary patch literal 10374 zcmV;1D0$Z(iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKDHbKADk==t^Vuh=X1yN&%>l;7#6JH0chlbn+|d3fx!XU?P@ z8zNT{YLH+7P>!1P{_S`0;8UbT`IY7zVLFXP0*l3BvAbC8f`Y`s6h)u$$%Mv}!ReF; z<#En|Brvg)F5=b1vm@`G0({zmflsQMTYYj!DWC*qMhh7uh22 z^ISG;!TTwdKqV0P*RwaT`(rL5q7)i&gu23W3>XmzGa{&#KoS&(5kjKnMhG-k9OgU~ zpfE}pQ7C=S6PPd>kW-#&;kfU4G$Iq!nn+;6r4}IbTwDwyDg+l8_DjOMUq{vhP-K#c zQGg_6?2I!S%!lx5{FbXTff8fodH7KY@=Q$R5I+23dp=7UyTCx8vJbVmdN)HMXoy4T z^}KP)*uukHEmfX+TU&6(LkLo-cm#}$kV$WA%S(9p+WZ*8r$0Qetb)yhEltdX@wpM= zkV-4S7bW)AmKO*l3QtWJ=O}q9v~nC1CQ(z@KCo~Bw&0%#sZ0rDa}XHtD8e{|m@7S3 zGQoh3=MXxE$Xgq&qc|`mGX$j>foMhucq{|TQ)xg4Q%(ak{G+D28x+3f8p0$88f#P~ zN(iPT4(z`|Nf2oaJO+$sRPZ>$SV_2{Y6^^M6>4DTsO*AFgQ@QR4UOq_Izk^_jWu6E z$l->>m64R1rLi5YGRTcNWBkTy>@8q4)>TOOYbwqANOurpfKn1M-!W27Rin419FXka$ zpf(D)&`mTcM2vb>Q9j^us7IB4^gP9xW|z#s>%%q9GwaD#^#A~})iZ?Ni{DbhdgiH- z2}9@^SoRT)#~7#~yyX|c6vLFE=aKQ4##GHM`)Uiqjza$hj7o(uBwQ$n(`aOjK>ce7 z-|c<3XDVKxm}%w-(Ey@6HhlD3iehe|NL#~$?60B0M$Ho(!l^ahUY(g|LH0`{8KZf& z1<#6ArdgyBG(deV+O~GJE@{R}%UO#`ln6aBB0&!%Dn;Xo1Y;)KrIAP_kVM%^lRmuU z0(i{k&Y(+V0yH+2`3wakb0cQuF5FCMF!i?da$;#{`7v9tnSeMwmtf^WxC;_(rQU?` z5n+BJlN>1@-gCg%*o(T$Z0%yKs6cPaF45hr%809}9hQZnmqi8a6C)cUGB?~z1;RKL zYKp?F`H58tW(8@u+rFn^>4~%*ghNmw)f7!+V&^x1;UwCY<`rv7p7L1XM+H;PLJ0{E z`wVBua#ItCQi6_Bzz$ z28p2DV8V!$Z%Kq^y!D56sTErc3;5J9JrFn(JVG_aR2G1pjCo_*03ZE|fhy#Qd)~-) zMlyt<5ln=YbT=5mtOO0t(Z~SJPsVW2Z$#~x7gDL=p3MSBAJ0-Ne9b3kL`{d#8;CU4 zo2$XEJQcB)!(ORpyTC7;rqNc?HMU|rW+}#MM3t=p+N`@YYh(yL8jpF;J&!QD9Hrwi zic6!lhR{2H^Fz;qKWKq}eJERd&bjvz;|Yzm25|$`K*@DmOKg2JX&I-r+N|rsETBt) zjFS-EL@u==f2C1G!@1;f-!_g&(zorca?xhwY|lUPwU(-pb17t7H~VKqRRM3`Kk@e~ z@Ge6frIY)B+BcAL*kK2lWlP$%Dvf>L-{1F-`-CMl#{T|+2cQiPGQn6~)(E0)j@}K8 zLw+N7O9n$z_e%(q>(5+qk|besEs}Ajm2aWtcAXZ~zh)SRTvSw`V->APdu?l|r*|%= zS~=-f%5+SOg(DZb1Lp<9&^PASGff4qqNWeG#iBlopm>5Uk z6d@)N0v{K9dEayO`qqbo&|7dK>qb537uAf^_w9+Z-qeBrlRbSXJ6Pef7I<+0Cx_-w z8eOF0G5rX=egO>Q(EM!?&9{MBALEpZ{q(Rr_|P*@lBMLnAh+3E3s;(0!h`B3;MfpAo|zeS%JOp>D8Z~ z#-4MAUWAH-L=oev)5@Fj_&tqKDiS3_h*QQqfLvc?+_%)SHOfb%3{AipNvJuW;8CLGuBN3<% zIKx;;$3_h#Gp}2b=}IAznCY(p4+%y*mgt7=!jbadQW1sYl*z~77l&aV@T0<5FSjOy zO<+*8lqC0sPz8@Z#Hen#xTf)>b*^YU5h&#&@JSO@&lf#Bz25fymV0QkD|)MQu{VT8R*56HjSwKW}#{}zVfq$b`278VK@te zD53@YfD7c(4|udV1}FYp?T4prt43`luQtg>G!An%C_Q&_tX?B~b+1F&*wkNt>16*q zR=VvrVAc80)2Frg?}PpQgM*Fz{}^S3{eL&}{|C!Am0iFsZ2dgG(p&1A%6wuzd_)H$CfjgKx45sBGg*ux#9OU%s@j|6-?kS1GW{|95bFRQLZK zKHd2L9;K9(J4q5*=<(%@XcRgrpgR=bC407c@%;L@S$tdNXxL@~$EcxFQ0r@rqHHpLa$l-YSvAH1>F?YTEc~@LCR)hs#H>WQJTCn?hfy>zI0{ZWZhY-zl>IiscV@(Ra_R+ zf*SRzhzHL{07;)m3P^1Z(%u9`u)w zxcsKKoqDV#zU5a|KrP$t_J)!4^FFFJQYOL4WYIn;H?o$K%Nk2k^`20cq4-`m?vZPm z8CF^9>5K@IdP;sz1)5A{hN^E5-F(I;y(YIi?S|2Ez4MKv?E~8~tmF#d~3{j?rV-n1LHJ zaAO8;%)pHq`0rr`cCh~@*Z9Ep-_b(+-@)M~{^wCjSNm^rjgM&mO|Efc|2?ezH@U_( z`|mm(VLuW4<9y@w+n9SBb8lnrZOpxY6LW8&bg=)f5Az@W>qDuPYAk{^Ou?1$pC`5W z&x6BF{P!ajCq+mcuC@WM4+|gQ6a1}PgZf>t(6jv0+kSX6^7?S&Mtop1^7^pVji_fo zw;xS}-cKeE{f$XA&%pE{`)EPYvWWN#Q7Rg{JE!vs#ry2`>6%XZQ|ZM&k)`TJ@u2qV zJ@nDZYF)pdjqSRzUGL3y?ZkffV8Px~KOOAYd+zB=ShJh5Zn=g1@2>Liz5%#}{eO6L zxUv5qqkIATKl>EKW&`kVz5ytQ3vBHFjs3r||2Ovk#{S>f{~P=N&r>?s|Kb`4`4yVC zi2zw)|L>oi9M|JNHut|iQt2B1A==|VikqJdle{5GN9$aeD33eY-B~Ba2gQtj8p3BI zKR#T%IREL@>5E@-gYh+=T)sBniOfn#Q7s*PVU*^GG13eSJ2^$!Ex zM?W0de~YUDOd1&a;V5tW2BXU#((&bM8c}siO4h$TSS6^M)^IcCKOGIS&&UZInKszb zQ{qVc?#K5P8ZT0D%*726VKET%Vb*fl$>05*OI2;%#K@V-YuoGjauyWh;-Ut!_`;hm z=cvTIL{)&7=9^9rqX0X~|NNfZ2ax|K$HxZ?^8e|^|MMv2mh#{1X$$F~H?yVB8Wb-E zJ$RCR3Z|E@Un_+cJ`T|Efw%3tvZ>usWZ3coo31G8wWypvnM?#uv|8y^okI&Pvh~}` z{W~k~2wJM`N?otK)@Q9j0CXLQty{k{Xbr5(_v0+TJ0=(NULor4erHUpOKai!m_KhjUFH>*|WcvTs0hfL#;igsY1K2;V?N!gWw6g8Hh<x1F8+NF`d+2$#i5PwnJ#%?Yr+6F5Vgf4L812I(04ICABL|Y_Xq&7uR^n z_(UKHQCzATHkHeNEGINDaD6tXTw3Z?dQ8^f6N_6e-QJ1Q>=Nc`&y6gwto!Ah)wSD3 z#=$62^XF9LAH1$1T4jkK+XGawTrSPBb=h=Z%o-B#Aswv%ot&+=yj-V&D?53LZhJLR zrN8eM(9D3c_njHH=7DSb zXhG@n_Uz*Wx1>hS(Qtn>#_Q6U@$G?TGQL?qhx|6ySW{n(}mies`2Qnl4`?X`C6Y%|%?dMqu| z*w-n;A)Kf2Gx;pGU+}Q?^*+E{9Biz^+;J~-%^zj{f&Y5*%4d-m-=;~zFv3_7R$k5v z6-!1zzffxS(<4qHmOdtA{>=3AS9Py(xBdUYogBcc{J#fBhxPsc$tM5rampI||AU8c z03STG2e{^PU33S!aFpdv*@Go+$bFWx-A{PFFp_up#-`r0@~;X5073iN){>dS@GvxTBx8bztz29^1B&tn0fC|CFBFKe;;*)^IM@oUmQJ zu63vK9nFt&Z|;5VT|<~<^MLdyA`(n#jC~q5_R#vev6C)gSzn{m%|2Mi;ki_lwED2` z$3c70TL8SsC@cKacVyE3GWJ>OnXmEW{kuEwsJ;Fqj?b1+JcJ8$2`hgE&d@eOJ)m2( zrOSJ9khZ{na98O)*?g;=rW<$TlG1hmm-%DN#1)ofr`_PH{IBEv`u+b$hX)-j3eKQ{}jY@^lFuu9QbJK`F5Zh0v~IjAc)vzqg>n(MabYH_j5`sudO zt7eKcBq^-yC!ddpO)^)N_)ZQruCZ&x=tX`7t1O-|Y-CvB6Hw#i8=N*DW|b9JW+ zfLGZ6PmlH%zW;D^bhNSmAESJ+K>qvRH2jHgdT_esN#5NJjYEDTUs%o31K-(BTFgG1 zU~Ya)y+Z z{=J+T=^>esUv@t|YHG81j+B-1iC4dzuAo?DRL%^jm=VQ%`HB^{qZCeU|p%-v$)2ef3Do@(YiNn*qts0E|%xHX^*?bQ~f(Z-Xr@#Jhg zIU7&T#*=ezo}4ee4gX@L-T$N4%dI~5v`|*%e;n`C?*H6B+1uave;%cL`c%LE(}b+l zO;Gc&CPD8dIz+>)`(73tIqhDWyq;A>9sr4BuF6tWVi52s@iSrX6P8Sgf1U4reaZ(e zvF^<7q-_urd}}Skfm3_CB8?S|F#G*&ckr%73H%V?+Xi^|;+k3!(voDsGGQ&TaA_1x zV<60%R4Nn2@|MTCx8`V@yq0}N;t7`QG|Kwde&CuL}>Z;sIwvcz=49xo-`@U8QPf3rUiig6S|jm6>URL1U>%+;o!hwF|jT zX)P<|{~I3bo%>zXz$*EFvRA+VbMN3_WB)%&vG#u^u_8O!G@kgi>2I<7OAP`pkV`+{ z(V(tK9FVV7pDZ{b+R0Qp(lfxyu7(Cz#8EAZq(dU(x+8Gj4vfQu(>Ti&NU9phWy(aT z1Px@f=cRqnuGu-7?rA*9hi%`KUfh=(s2v%xh=u6f|50QAIXhh}PD>@a6uPHF(}EM`F7A9-dJZ|;RUYc$k--liRfR6Y7^ z7*DIEWwnxfxMb!gsPexVmBo)6ZLa-?O8NHEF8^nQ(NHsi@e48555yYZ{41B$=YI?F z-v^ug&&MfS&H$V!c)~D-Cyev!G=bhSY1e~ZNz(P8r~ljDhCS&2@rSo%W0J-^jYAlI z44ijUXN^hsZ-*)x>TEnVRMXQ4nJZ(a^{jyknbGZxO z;72NziA{4bD;Pb&-|{MEXcbHZ{+3dKA@t6(zfRAeOCSOvqEc#l!I|+{gOgaGrU(HS z0%gL@Ef2a01XCgujBw0FURA$SQ!4FC->~$`;gx=T>1BND(p~=FejBiI|9`aL|2;ZB z*vS9KC|hgA#BLRF%TK06UZmqO{Rq8Mo=~@3pi1M;=|fvpIj{uPD3uCguFUc@jzejB z8^X7PZ#DMcmQen-SnVZuz}A0;YoNaV{VB4DPGTU<*W*7APd4$Nk5jy@EXfu!XE~f|8`$$c)L;DaWU5pm zhl9a{s%bhhXT7C8%AoFc--b6NCKC)Hz*q^w=qG8Ds$| zz~jvE<9puL7AW?ALNWBbtE;OKkyGy*&|1r+F8~QcDB;wXii{XShItY4(l@5QtZcFZ z9#w#)iMD{VN7=Sw5&Xi)HEIUcSFsX2Oam?RJnwzJ#efm#N-9AT2?TOdzq>uDLrfT- zN1C2I)-a3gP7AFvdY|&`jHr-GyZ=G>@ONZU(YYSBFQ?m?4uHtar<;Z|fyNq>RH5{| zJ`_E|zW?2STgmgD9sf~rJHJ~prG z>{&uUO_7RxDra^V<$0f>v7BX~n_suI0(g!x5Y%K{c4+wAVN!YD}@v#A_w(n>dUNg|yvdNqE_)tNwvv0CiPV59~6yBP{WL+nVV&yY2+I)smb zBuS@+SIfIF7-c~XYhYEvNVo2eRyU0DXo>D9)o5fdF}XjCZt|ch4Y3(nlF z%!Soh%YEX3xkxC$M9G0m6iay6&oZX^?%8%`Ao`9}&b(Siszgw3?h0T;O3fyn7(8Q` zh1M$?UVZvw8GM5g8INg9RTlwS0Dj9uglDehIt{yEl+Wr~%H-_F!5fUUTFcrSra-CW zzC;*M?Ugep!i8IVrlc5O|Jb8YA3HX)rbDS#$1;hxS-X z22%`Eh9PLnx{Of=8YMimA;0U)n!_kC+Y}dGVnpSF4Vh>p*TblkS=oKo97d(K9q?I$ zMiiG%hd1_waLm_|ZMj?vEcC&4M{3=!Xj1h=DG9STbRxc& z=WOOXSi4O} zTUObWYr7GR4l~*3P-Lc%Rpzl3n3Po&=3)p-Y2>k2W zo7ct@Z~X`|!GMmb>)-Bg4R_U?4$j|@2tK2swtuD?M9r~akM`ju7l0p0lrUV!Z`2=W z`|CN0*6;!TU)b4sPm`UUA^eM{2Gdkx#Rc=B1o)RHr61dMSJ}&wqx5CrD4kJ~NmT1> zNCeKzd4+Wqgd8R8?8IE{?BqkdaxJb31+TdCsI+xda>PQ_R~JEI>68s_!KIn8*V?Yl zff9&=i0Ezy)@i#?Cp*8`cC-AIxQR2gb9eEBI*$6e=Y53(X+yi)({AH@>*jnbUG~@A znsoQ2!u!`3#)+H8G_c3XBUCqtaoKlUZfN4wmO#!hawkz2=E{d|`$+@;!YQiI-4yY4EPMiji3sFq>4vG(ZC)m=q7Zm2Z}Jk7&BLW4e1e8 zUl-bPN(6@gz>Ztx&v0QHz%|bE>5k2=>xrATeol$)O2ZEtHHQO+zbrsD&b$0pxXxVp zIaAH$L(6VM>B5f%MsZhvS$RhD=9NR2l2fnR(3~?7S)2k_E7maT?4bS(`R%jCRi|AX z)SuyM-#_v9_m(?=>L_*a<}X8OsW(6GuJql;tCbja@R>Jb)WK)o%~PJ;nYM>XY~xu12YxkNC$H zl$K$%Z2CH2)I5pn&0537pKViD;iGz^WmBgY+n#^mA9R|vCX8C9Zas`jQ}-FJex*@F z!@1<~z&4IZ(ziE^*4J7SMlDm<6{CfzTW{9fzh|>Rt9GB5TSwn$vuB}Ufl-1&QmHTw z(D4-5h7cKpX`$iOhjS$1KeZkAE)KBnFkgmI?k)1O^8sHfF31G0mSZI8@2!gBGh7|) z|BbFVC~LvUb>DaYSbFwhMfFzWSs%Vh6Y6!lq@di*Cz^wJ<V z|9PbU*~EW7LfL|I%K`bfR?0z&RKkcz41tfwnP((Tq!LIX`0#_VU7$U7vF7)&{(T-} zALonyoI&QhGcjn+ap{8Ga`$GjJt4780Lyl(<~Tq?Doqd?FJjM(OMv0W97IABWHd%; zPqPV(@dhN-m9mqwLZ)eC1e)VKb;E(-44H8fIcSycVa`+AH)9wm6IaIgL;{V0$eDL_j6|gZcR}D7g?__R0UtDh3-V`Q z=*UT*x#njF{>K5v*%avC$AfL_G|giz`ev*pzj84yGp3d??7As%L60VVJ9C%G{I{8w zIL1I>Xt6g5-I_Kznd~DMP-h=@!zl)g6%~2ZLJg&?g95eYF3+EvpwdUjOOKd{mM2%GG}xcGV`>Z$K6b&_q+zUPOh}cM)}%U- z-2H2?$2iP+LQT2E(lE8iWjeT7eX5QMa>>TCHkJ knk0YWa)G`|yLMAHWm7g~QRV|C!4YSpX^l094zm1poj5 literal 0 HcmV?d00001 diff --git a/assets/rancher-logging/rancher-logging-crd-3.9.400.tgz b/assets/rancher-logging/rancher-logging-crd-3.9.400.tgz new file mode 100755 index 0000000000000000000000000000000000000000..88373cbd97536befced8b645f145c0008cf721e9 GIT binary patch literal 36673 zcma&NV{m2P6Zae2wylY6+qN^w#J24TCbn~8PHbah+qQW#zxh8^&wX?2?iYP_^~PCM zd*QRXzfBkgg$DHZ_@oA+HIh_jG?A2Jm-XOcGiFg|G*w}<)K=kQS5Q}Hm(#GeGO{)G zP*t+ylQgro0Xpl@al+-Ap<4R$fsSr)wi?QNF?KipFa{mBo_${JmHXLPMr|sW~V#R;x=&N@8^6e5s zc6AZan!Ixu#BQVAT;q zM#5J{`!;R4z~8j(2vpc^3y2H+v<5FWLig5y^IJpC@?gYJFNk4v~R@{N&o2|2le*jp4(*VC!CUJUZUmj^(#s#&l}V#x!IY zPA?Fok)y3#z%Pvk$uIv5!q5TI%)fWR{i@C*d>|IU=9F5rVDi2dc!>)|?(nf@yNb~% z0%PQwmMWO9^iTFE(YB0xeJRpg4`i6DSn~b7IDh|<6FS-IG2wN=yI5}1NlUR{DpGwR#eHFQbQ+K~ISB8l>pt`D2LcvJth2jdMVcKj)eC+<5rh*>#=cgu5rV0^;>1At4$w!zWDRL^TfX* zfF4FzzWNzDdhyX-v_KyXUloLniE)=GfC1iI!~L?$`U{wboYs8E_quVdp-H!?oULdJ zMH=4ew_R~(wq}k*$a~<2&-9N!@g{ijQf?57jLz`wMhmJuSS1- z=D9{?O}l}6yi4qWpXkzhBs-+^vdSPC_%=JUA!1h0@{ShEv@i+e&XK`0o$mX4PB3Bsv z*xv~ic^;rTvYlc2%duHUkoGQQc92=$+V)pmj;*)LbvQ3Ivg@(l@+53&t^ms>Bsk6V=qj*=o)PdOW7U_nDGqU5b8c!l(|Q>Y*=LYS!d zFZfdfcVOY-k6X7zO5+2p_S%QVClxaRmL6D<^jX3M27X>jJt8-c$Jdy*>U>st}liYq*JVB?Y7EPh(` z{MK-DCY)jW&hJGPb1gp^GUOC|ko(mCBR&VPQD+5R#{k|nFI&Rrb`MJ3Mbp;jG$y;U zj2&+#N}ivVL-t9@GgDK9U^hw*0p8Jw`YUe9j`YuL+7F58kBUa+$5aTm!u%o zi<3{CDm66R?iG;Cv(<@WbB>PC{vLAA-Cb6g6{-ch(zBEAaEKvQ)nqb3*=~r?3&dX? z1^oTc>nYfkAC=?YG1i(FMu`5XLsS_Gg)o%H`_4ZvSXSr25iSpRfDLAc*15oR@E!Wx zo8n=Ql(1FN)14hN5>l$KAs_;Gs7s}0r)jUwt!9(Tsz{Ce`?;H)GwwHx3O`Xy`Xm;F zUcje^tkL=z&pA(+hHluP4Pd~7uni9Ow}^7k7Oic`!#6`t$R+7H2O(o{ljle+DuDWr z_@-bs$*@O3WW&JFSMeYZoGfI%xwIKT*;5#mYS3cC7V{rkDMDw%XPC7=fnwb-50l$w z`rIhx#`%O6*-RIBX6r|aD6lVs-Ror?0Wj(@K2(sI9_DKW_^Aey1;2atwaDhULx+8s z@C^NHN~q{KmWE=`K$$X$L2SAMHilyLkj{jnd+;Tnr}e-x1K;`Mai^-^BiK2-4MdRI zav)I|Kg+zM*&(i_#9ybj<|c_MDk~im9k()?G=BD>&_8Tb2uVcIc6EMkapd?n0pogrH^_cz)nM z!hz*dV};tuX>=mT`9XzFS(jnRM(HjBqL#(^R^qt@t?ZFsV=|v@zekD7rv45+7_Ay0 zEcFg%mj)~v)BFZLLZ=yw#SGOOPQ1kU!$RKl8Lk$}xIe+D33G~nsP_RI>XCn+AS+b7 z2F6vv65?Y$u%cZ-mcPP>q!J}jA3SWtXPLswm|eTFn;} z-ClkolTVVJ0ESlW7CY|IvlK%%g@{&-CmAFf(?2xSayUNdY!(sDz1>wsgzBa30k8cD z?4T+$tT}*kS+z%BKa~~0F4e3&@ z9HcNNY{wA)Q5rpS2V!s06b63l9pF)Esj=T+e1%>Uflh-o48MJb!yWUYC&Dk7W)QD*ak)~BoKPL2&pgLARFN8SGfnB!7i47Xf9&{vsqFO%R99I`O1nWUxS zE#V|%h}VRb>`|BU28Np5D!abYx(i#2GpjtrIOZics$g5a@1D*+{0eHRJ6q@SHgP$m z;O$=YKPYEFo#jQO`cav#T-t(}_SyhHtX$uUsL6^|Q)G^DBYPcdxvb<0AIMjj=d2vF zJ4C(zonek1wd(=oLn76PR*&wLTR2B7 z7q?LstU)Iq#1%bjOas!K8Q82?DbqAJEYV+5rq~WKr7XHceOgLG`U}6$?;hM)x2FZ{ zqz%q}*j?nq%FT9D)>G-~EBRbo(RrzY*!gQrmQ762v)_(ubJvPp*Dn;_YpU5p<_P@G zAC#-}KB{V3@%6Ez)zT-7Zc~QD%+iSe>Um;Ew#3tx%;lx_#|P` zZgtsuZDB9*1|UY8vR{Fo-vA45o>$yK+>yWSU(pPaoPDU@MQu!&1_%!IQ?;7+q8BAS zI;T-gN!E4=T24BMk{FWvWE7CS`lUAJ%1j!!$;y^QXwt+QwXkqvJEH|2HL5W(&eH`v zAuUHqC6a#xl`y|=1BjVmtAL-hk`?6>-Orx@)g2*KInDF7UDgwx-4NI3tr{QS82q%& z3z=|0B|79+t=d0!mBxC$??UPx;-P9M>(Y?ryr$qbNxHF_cLV`DOuH7mfSLnuKei{xQ{ci;9i zH=6wh`!VPZ9!m*s{aVMS$W223=G88i)P#mhR<(p7fMS+5>~Knn6guk(UT2F&*v0GJ zivgfvEcUz2_6_L&U6NnkYxu4!Qf?Crwwg`i1=BPtjx-YwgPYpiOK&)?{bQhfm7QeKpl{g_LGDG^`A(VHk^QXr zf?zVTiQlgVW(|>M+k;mBK~|BPO#FVdLdzh^r!&Yj!?i+w52ixUhC(j0R{&FTsHb}h zQxQZDoq@=*l{}p@5Q%|to@h`GCcLM>G^2vY`DPNmueS<`IS&vrHVp6pf+xfEk*cnGX*ZbmRHF@yHzD1eCX)~Fj-&7 zmL%_`L)2yQj%$Qi+1RCLzio9*m|ArLgf;84-)bG-nwW1x)y76PF~{jB{8)7-2PmWt zpob&Ih=~)-Zp-8xF3T02OwzPC6Y72=yL%x(!_dP#SOAQ0tDg#gM)kjr2l$lE>yy-6 zE#Ix1Ajw)UPGwhHseBRL>27KU5NqGf9YSK~y2H*gU5SL&0~1npe^?88ZSxs9484V% zV|0gHEr(@n>jYPoSjpaj9)5MdIBNrB4h1}yAHT7t%1`P3%J7+Rofy#RDI)`m9re(( z9>371c_^exxiVpGUnRuKmTW#t|K1-XX-$x-=tijxo@YiJ` zpdcBQoS+6^8y?+G=67M#gSO2I${&V{9A63xX_Q<3?m~!zQ;RvbU8;Lo9B$h(2Ac!u9A66vf3Q=7Q;b+q8fb}+F`KHnI2Um=inaeDX=Yw{Cj)b79&#u(z#7?L-ac>YA z$u3;E%)X*#Qt7HBej%RAI*$c6{`$FPlkyKL7W{5f>*>4ZFzVzaj#Aboc?qSuy}Al5 zFs3XZk-1>*mYF(_k@1?*ZL;<-;){3Zmv@icJ`IynafXR>Ep>p$)UI9M@wzd{@PX_l z2C5*d^AhH>jzEu?xF7S2LIF{M9~J@gOPG9m^oQvkWq|kHzHXo(`Mrii;$ zQ7Z%H=n@r+j$Vq$^1;)ujFb6o3jfwvmIl|v7!J9i1KbnMql4IRW>HVQTFMn^H5}5> z67mL-&-=vDgf?L-Jb@18r6G+!Au2xEYkhEbx6tw+ zs$=+0=CP`fJHczg|7guZ>ED~;J<cSB%!G&%YL=d8x(0Q zYqxI@zTbx;vqR0gjvJTHW5x-c&m;D~&P?fbQvn8PIR-N*huL2c-We|OualP?DD82v zBZ%tv=TrFq?O-f5rFBNT#ou>SL{0ij{@3ZQsmX1IN{cHjY<1LsRA?u|8nPW-QNHJK zeWFUk0>l|{w|#(F7GQJ;+WW;d&dZD_a_gj1WbHBdiR0gS2r-el&bB%Ov)}L#gKgnh z2F1!umm3xwxi;yoT6|>63^URAzJu3en6?<2Z{MtTH-3X>m1P0+_Tdz~>9vX@SFtUy zzw`UUHDBFukAepMDVkBdI3n_%)2$d4hrWQxcwoOl-6U+J+FaBqGri4dBe$i0}Hg*{?XD87aF1|KLv+bE>0U{rhhqiPHzH5#+Gw2OEzG{%ecdCssju>aH zxxPz%(7tKls9ad@9JH0WJnxU)Iew>4M#{qChnhj=$y?EH4cB5*p(heHWlQ+eTCWOi zE}J>`Bl@W0$hZj5$(JH^zL@(=C%nJZ5OdmW<7shNr>^Evq7~d)Co=7Gp!MFaR@mm+ ziW&u(MGh<^a98@ELksO;CI>XaOopfc^mkgMObm0=r8%)(Ym~2 ze}KG3yCO?+vR}rRp+)#$n%2a0N$R%7APq=k&FX|~MH?C^N78KFRdL0eLk;0>tx62@ zcIPhlGoodL2gJIi>1f7ScQk)1X5%r{H3^Jm(X6c+lPCNp$K#Z)OgYBF7ITXqqI@wF z9T|uxqNo&Kki$+r{4)?QZb~uUWD4)Ua|wR;3!&Jgx9o(?OOdr#Tyd#XW)p!5u#~Zx zb%*c#DG0dgpc&LYUpp_7%ygwjI<{1CDbSCw{4%eYIXq=2$q8b%u(`|{sl;>!9Fkp3Em$U>4V=a>$h~8o9iG}A(2853;P0x zX;|69Cv@^kHm00U^@C_0(bKP}u}ZZkY*ZEbQ9Izslf5;|7C7s!Uxt{X#l|y_{bAt< znPH2Km@^?m^4W4_7GB8ej$3}WqgEjta(xWgBIK>j_{(mwLfm`*AyR+; zKk<%!i9u$qO>S4TdV;Lty$$}Id1nL&BK`mCx?A^Q6j{$o2be}ji1{7mpQF&6IQ)gz z&0uOsTqC<{W^abP>~ZC@jTr=u#X2;rNm>eN6R#j=)$bD+i*v8^A&AOHDW9k?gk(6} zW=~tcWR}I+T5VVfA9H#fS{DSNX98_mIT#uLNK%Yvh{^7Bcb=PMd+v0o2p)#IUxFdQ z<3G4)Z}2bz#Bc8@2<39nTeXg$Avt$+MP2$^{>_%RP7Dc6;sHl7?hF6!SI!r4?$^SHd$ z5Em;`#ySTI%ewEUfW}Fz0#TM{I*JAPAH$fB0%;wG<&o*iKIlpxi&AGj7y&=xS}x>e z|E``S=an@Zaf`LaQj(@Qgbzj6RA(6*T%iO{=6)s!l=9#WLJn+LEpm)R)Ksea7tBQrwUrd1~?CyzO)i6Rpv#P-Y!%AtWupYvj;i0^g1(a;eEL^x73euVKOfU zcOZlMJ)8rVWBNqaM~Tfpa)r%q$?To5o%q*<_v#@H993pOpp&kwrB-%D=}t>hE}(5B z&8AnMInPEI&9O-EliAm$WZ>kdVFSQdtrq3JcZHOA?D(1S=jjNg!fPFjk*s^^ltl$S zBUvFUSuPs3d?Y(O25?@XkfcbjWUcEE2~jW_s$gV|P`K;jw>?gv*MXGLs}Cb6L-ric*j=|8?;` z+&Y89uC{f_Z@KM2XmQX`JdP>)cx#+Qr`_aod;A_r&72+PvLVFiK8cVOJap&Bs(9On zITPlJw3E%fXabz!m-QkBS_x7l5lo-(udy^>;ugG~*;VQXEnNpm&+mHeT#ejDATd0$ znuxwVy;eX(qeyTGkZ;=bQ|^5#usWaI(IydJVuedS`eW%aRt0`PLdpg%D425>wGHz@ z9`ghd74f<{)5I9jC=d)5AGZPt4@G$8rmkPvOLDU4ccMykXgMohuWMF7u8zOetXsaJ zI$O;0x5c(2ZJ)QoE^tjGh2!dt|7T>E3P+Gb@r;K5nnB-e=V0tY=ql6C58l?3Nd&9Z zTJ@!wUAVyBX+|3l=kj8D3;+%RKjQCdQJp?Uh)t;CQoE=^LSa0Zn`7dfY@gI1dap*C zR}^Yh`_|0EEo@e}+)T^HmG3GkxNy}*^Hi`5gq>x{XTjK@b+HN^EpGk!Pr5iMyx;L7 zjJW&L=^9PVNM+7B{~MUhGUu-02r&__T?+u{p!f8(rUf0 zT+VasC6LnZ3{?IoVn9+ zXRC7Oqy<|vbeKb97Ano(K_9pFF)ybe9-B-K^7pPc0%B0wBi)LQ7lV%7zpW4iEW5k6 zOvXYc@(>=lrp^jnAQGzz8@d(mS)*S%OcQJN_Vx46G|JK?SM56x;c%awnrbT!<-^IA zGydO5NO_=C++YQL_rL}rYSdr!4+|3=%5=;DV$1Mo(dQ=NIvuL~>h!J^p+G>WfCtiCH6H){l?7u}8#rk-8 z;k6dW{nzy_yw%g2($mvl8?3W$q{+uX*=Tj!nBP@y_hySt_-F05y*LeG@^H1(@(vo| zWGWY~sLD+zN`-f2q#mRVUt=U z(@S)gn{oMiZ7aobdf|_Rs;}3^SLxMNE+SM9-om1nqVgTzg&((ea|#$EqXQ#Y6LZ35 zlIjF3fBJNDHh_~3a5sJ3^8|$j*$lG1r~X96b;VLLYZ{=(SAA!gI-9IZ#)EX%M(@Vl zqGzL6jf@luQ=l`K#Y=(5g~`(7B~m29q^S;nIg@L1N~8sqs-C5CHF^6f(f&xWI`_KE z8_S~#28)eb{u}@JPRfphhn1m@|4R}CuJ^i^hI<_}s>`)Rgh$W`Sh6`e`(>YazEja; zn27ymRdfOTFws_FD00XZQNSGfx4zXy?as(*+e7a=|E@q~LkDsJ!iO`i`bWgju>MtZ zttojD95FhYMK!8lq?VlpK0*@6K+#UIW>Y%c%{sMB48aR5DiY?C6%MTMLAEU)#K{0a zNuvnR#Q#ZM+xoAo1|N&VCsp#_*wT5QQn-VE{h?<>3$hW46YhU&+9bnJqj0o)tDJ9j ztWR z%5K$)i$Sr4^6Upk?G4(nM1c4I3?{oJ%wC^-EXXreJR(=R0&lz>@I142C$2VPX`hbU zVw>XneGve~lu|61G(uii+DxGIY>1z3%h#!k6Aa=}Y`_fuu@<2YzdFLY948){`bRXi zTnAl9dI}q1GVWSOwa})eEh)lfQuyInb&7I<#gB>^#k#4?KCx-`k@l0Xz~;HbGQco^ z)ZUbj-}8m@!*p}c{qD5;Bklt)y;?$KqHn`mw1i^LhSYt)+Vl+t8+KSza1ewUBfggC zYmK5KG6DYUVtlv_j#SYDqd~pWNV?8aQUaf_i1Aji-n};&M7Ak@H-}E#4?7ES5F<8LZ3r+MwLcsQ~T@2#s z8!pFsDgQG;6mplXcU3Zf4bMaUEOaJYdq#OeQSG={NcIufq4b6I*)3U)6`i(8Nj+}s zYPs6%io9chmF3DcUYtB!5XW~*wnTG?R#P=_5QsKM#=5i_^rZDl4V>34M`rTohY~@= zbCYC6wnSIOP#W?|_3cY~&`5a(+3(tQqe{|q%k6FqaaM#|vjWGC@mn*0ifUzD548ZX zbSegbmzM^93%|=!*HrJ=J1W_BYArp=`kM!vecO%Jj9y!Kowoudh^JYExf!GPDxQ!Krc|??D1PY%z}><2fs3 zS(8p|!jCEr7bUxg+COEP)qqIPLOGf zBj2f~c9bpl^I4vC_dXxj{07ZBv|6&^uJytyH^a_|pvUT{wsQl&lHx>Ex4(W61AKKJ z*cUreNHg#&9nHnwci@PyAFR)Wt22Cp3mu1#B*_(9Gn*w+XUHxaB~qxcr`7#+oqbJ4 zu!(M-O3@W!`f|j>{Bm(z1dgrunYNczwTzEZq{y8KR-E~^qdeA4X z>qSK~03`7dw&(X75w>)+)KoyP@3%FUo($j@CxA?M#3;mpUCbNbITWk z8&L5KQ@AZ1I>c_nD62Ep5^}8zG^}Q#U2g&_(yrGPnvFq{KjdosOIgN(e>N4x%T%6R z1uISEAEhLwsf45tK?JzTp+@`g33N%$4+h(R_ROgzrFlNA3~G(7 zB-21{=500cB&w?Z$ymCC3VLYBvd!n*o&R9&z&ZUsuKZ{i%$?9nmCxm>YH9&1B<9Lq zSN}8X913=Md3cij0eE>io8-CS+!`7oS&l_=tRtD$(>NV^{L3(R1053rY06jhPPoy?B`{PDT{gNQleH;aVdNK%Hl9joK>Q$@6^n zPiuz!aZ@W>Q(`t1s|U)ku!Q;s^nWBIs;(b4v{Ku=0+u~3fIDXGB?zI?KW#a7T0ozv zzby{XVY-RrylyyJdM%?IutB^WtbkQ{92lnI)msDfvMP4qy!&P#HfZ{J!2a@iek$>m z*8t7Bd|%0u?g)KpyC}xKIFu6}n=l}7^H)P>#W*ool1JBb)bi7K8}hsRv=jxTExk3* zBEI@l{4d)hmp}FrcW&4!;48ERIbqreB^J-Po@K67tCSAo+*m)d3h@$&L(L;$-w z*y?mqH<=~xV*C`c38j?(uUzS1)vojS)93bJBGIqweN|)A_jVv=Y6sI^Fi$TV6XmjC+eHFN!rJboTGsP<)iR_5}&_1qbAA>HoS}T!N#5Yo67?> z4ZmfAqiw;?(|1b#-m8@>0(0F5i|ct`d5ga)Cw}hl*346|QyNb3)hXxnYfZXyxvvDS znfnu`)%t$3_zv(sx0|j6Jt5X1?u{+7Q2|gx)OTt_y)Q~<1AGq$b#BrP0Q%;a{}eqJ z{Tn;?-C_EO)}4fQ@WnIU! z`#Byje$hU*FS@&i9mp7a=sS`!H&nlp?CgO3-+Rm(HP{ygBS3$4Fb zcFg16tVG*6DWHA6ezuFDb$gH!ljjUEj+!`p+{IJuT^VN}Nb6?j)Wz8vt!e zm{^VviNzc++Fd2(GVS)pKHVt{!`o^#Dc9j}5E>P3U35DxmoFp*%22V5Z~NX{Ly>pw z5cn`+9LhB)^f}w*x{aP-aXkTL&~os3R)rIDtyXIrd30S7o#I7(hSyYICxRB8QTOFZ z82ukjjzf7ql`I5Kahh6UH7h5bI;c)^do`*dVZvdvM;{e<8~Ek2gRvES5=&K#OW5k! zu2W7ng3(~#?ljtuQZ-f$Kwd>MJvr9tj62i+OOulo%DR6OnbvOTxKY<~+RyE6@Y9JJ zPx_R!V#&wj)lel(B8&nQab@Y6<$AH^$7FTWAy*#)REHF#~@TQ1X>5e}KjxJW;z`5czLoFC%~6F1Tiv#?Uk3QjDTQ_Z|! zwFALbnz$!Uxa~9Q4kEq@il1eRm4ol5Kra4ECw_$3wfL_o}%ZF9uzrvon_^U|qz}I6m1PzGoPSPjn z+6<*DxcA@%1AFUZf=*37Uj0?i*q-d;ExW5uUO|$D5MA$wYji6L7t=d&g-*rn;DBGF zw<c-{O&)T83#&43*UDl}S=EyBFMM638s-*Se3#u2G)sRph6x{kX(7zKVx zI4yQMLhjER;!=OvaEYhyfREE$iE-$2#!R<`5WzBSF=dZXlU%wR*bg&!gI=$wUb#SY z?%WtzQu}1y=_hNGNv}KPk%m7TbBbGDdI%B6p29cgea+^auJ`R^M2=Gnwju zsK6wz$r{&a>vSF&#$8N6n~E1Gdc_yFQDxqsmH7?wtpq&p^G4$8W{U||nuM=m_5njc zc}4a*2KlT>G9mS0lT1P29r>Isf%g#fW32@w5w44MQ!ibDwgA7clo!ZDkS?3+93C3hR!ng|iJ;%i86)xB-$1%=UCBW-3 z$$V>*e)xK2j#%?mRGXZl0|p`SwNCD#-?~9NKu<6uzIG2IIOs{Z!$8S2G8Z7%@xW5L z*=*AYmT~zuZx|1WRD^b50_dzHAX;1J`i=zLti6!giH*%ZnUO~ddI(7>R)`>Q6zXJk za3)lDt*BX+{>Qz2QKZpuMJwLfkJ0uU5Ip>BJTc^Q2_e>%`+0Gw;+@+$m-{vJxE~kS zkvaL~s02Yu|8v?O4=a|tS(WwmKpmvDCDq=(+Tu=Dk&>x?C&~rwc>pdtZnapg073H- zWZgH2lGRnbV`l27<7i&_UcJ!aGJ1+G`Aq50!G%Qd<~P=A?bQZ$HuD9Je%GSl^jl~L ztVCiBsM3lVGYrgL3FqUTJ_>@|ill~UnJJOhMLJH4ik-mKuxP5UOkwm9!EWL0wzX;& zYQ}-|p3~*Q*<0S+ zibgBxyy72aJMydkbZmfEf3YB79V>IvM6}rZRy8`btM&kwK<<*V_@%13^>)9pY?z{s z7lrW%y9SXW4*UjSVH^$MpRZkIwuSECQ=XTwo*$z0%kPcNp%$I@TYJOS0hv+zydJCm zcxnE&>HS*xvHOKA&usI4ypZi{FL691cS-)dLQ4HPpDL3$!lQ>(+Z%pO~D40N^0p+VZ!ALn;~~MASKfT zO+C}7V=#FOyrql(M$V_zgfP5Us}n5{o@%cyHw!3sm30~05#A$EJZH{zvzs3yq0-0H zdZmD~MAkJR!}CKbbo?Ga1gqx8#{JQFMX$SjiclXQs~5NWq6@4&7Io`S^vEXc(Ew3;I98^|so4}z2r zSX$L(4ya;76d3%$lt1TA=yw63E??uYCmscD8O9UGHx04*9kP3B`^Kv&pRhPXe-#;b zp=Ggx7tZ9Tkm_{xV3;3EL2N)(uKGerJ3$JW%FIqKXknw1mz80((k*UXqxm2N74kc}b4 zG%De{PZUzn{Kr)WnxMG7kMjA2hRA-qt0Fxt88Z?q;1Y`&f`rCaf}=#E7|7BCMBpa{VB~e zDhyRq*f$G(rQXF}_ zRah{TQX1JE%b&26fwcn;(v@P%H^@ zul`OiZzKYZR0ta96U>acB+_1rK4{V_T9HegS(@$h+Yxg~sAe3aX{Se8f9tBxw5O&P zlFCa+{HdFVV{e*B+8SObnbU!7&^bzbGLhf6u*{!nR4^I`>;3|z7+ll^xd>s9Ui-H+ z)}lg#cIuQ~k`-2?5%P(Vw?K)nCY(BIffuwA`Yt!uSFT!hu#^B~ZZNgLvDfXXTLjsB z0{&NqI62bIMs5r6%B<9JFNJ3S5F4Kl>wZy&}lXA)-!#h~WW!^d; z^yDc2ah(sxHl~f6?~W2PMa?sg4ls$3btjIRWL-VD~iET z0sR+}966 zR?e8A`w`jBlC7iRlc@mr>o-_6>Q+G>&KFv7%x7v$9lq9X29j(xPg*I=r+8jnv11ZJ zG;y_G2twhdjCx4b9bnZ1s`(6o4OF^fR(C|mv`6Lgdbxj)%cWfwjM`|`cO?jvTX6US z{9zV=IfH1mB(?_C8)5}x%B=ANF!Z*$K{)bfT;SAA2Tq~pQb!kozcZoNfKKJn8V&7^ zdqs-Sno#BrstqGtcIyFiA#6%vJfl6`Ah6~qxPrM_i=9KwJr$g#sM(`8kwQcO8t?EUNwye^@NFil|db#9f)|lYtdy_EhPfTH|C%$;;0u~O20~hvl;iF&V zWGSwC#c(fAi)@h69_d{X z+;&%zKFq+0!9F5D?MGYAi?ZKW1r7nGV_NZcnxflfQs(t@8@TG&20`tc)>Bj{~5$3-IyhNsi zCum}GpH$)jCnD-@2LBAgnH~kmxE3xobuc4Ya3O4fQ#rhrgM+!Lw=8dG9k*`jXh> z!Yf~`GqIK+_8DBa&ajpSWfAVMBZJ&0W&GO(dGf1_;TQT)OCmSi;g%R4<~L<=?7oR8 zs91e4$9s~{dQ7IZj&SXu59jybSY9k5{trXH)POcTkM%s^MUXKI>k}vCx!XV`Egtj8v);Lm&n9ySN%15&;1pU*B@-fq)|%?Ek8lC3JP0t< zNX;SG>7AWs^U9^qr|1=HO!^ygo5mFwiu7_kmSBRUR;gLh4`QTDh>pzg!cFi`A`z~< zc=OHhJ`|ge-ZwY>x-Yb)&v%Zb6x{yXe!U8vR03cWm&E4!!8RPYgT z1NY<@nA9>ZaflliFS_&~!10Q^=uSGR{1ueplBrl*`|VI-XiVvfHX5 z$)iQhG8$bVix^`6p-Ww9O9%MH@%Cq$_%+*pEskC7OXmgu%w(H8!&uJFlaf!M?D)3y zUnz})i<>y#snq~7Z1svaQZ@?$@h&Rl73H^*aH5~Xi6-AaD_=u$yXj^HJYOBTTs-pZ+wC*} zVK)}345qtc*Sni`Rldr%Tq(DUn}{B&^;@kMv~z<6Am2xG=1eCb1Ogv0M&0bj5WTq- z2yi0Cgrh`ma5GXCCo5tld99$C`wD$OpHy%HzvL3^IA5ABty(K!GrsrH?SbP3$o7d@ zhJqZSKg1C$u2|ATuU0_6Hd*8R^l_)i2)g4K!(LU%x^SsJRT{OKkz@K`Zd*fF*?pb3 z;eJecW3;hcyoxsLaz)L~#ih--$u4%WCpJf4CcJP;W__i)iy&sHEKx**;=W|i$KL#R zw*BzKt*Wyuy)ka7M_-Y+u2zpn?eZB|A zOf@Ez1@neu!E11pu}>yT?u=_VcVBB z-16oSV)oA&zf$bGgz)SQ#Rh00)ds+7S+2)O3bsedH?gZaC$cDFO~$KGZK_31|+X6Jv2zo|NCead+)gU*xAV z5PA}DNcSuf^`t&BnpD>K6rb6@K4XkEsy936`#;VyFk|(UD+AD-yc~(LS%Nlf5=e|K zqNCLN?4mB(%a2>9QmJs8HP?E2R}OM^FIm7H1AIbw6=fD`Sp7tBzYq(Bk>bD&w%B$C~kk^oXC!G_{4Am6@$!N^c zUsnRc)pQQW3_p_9JWZ3o348ewP(3>oov1fJVRm#RUY~OeBO8*O@vxuL18WD%S*heW zf}{N8CIYGs2qoq-5vh6D5Z1&Ti;^g2=7U^~W8bAQodaXVm%W5KGMwq*z3u5A?JHsY zQKd>txyzmvPQJubBw-WJf1yZCombb@Qj#pOr3=C;d*u>1)QvEJ*yw+Y@M7WmiCi9L zwIXw}r#klQ04sSxqZvxY)Uj|}kj0G<3?ndA00L}d24i<(5 z7aKApjSm=x#fiv6h?p78oc#ejBuyz5`^%eF*cZ87fYaPaA}jpox_hghKhsNeG6!C@ zGP~62T<59y(AekJO{;&_f%knEVGc)>FjT!^TenJ6KI@4cUPT0z?GT*mBFwTSL>%k_sZzN{bwj z_-z7}-152dmIbSWVQuZ;v!wFWqfP90PEhkimn?^G>iBhg_cj^gcfg9Wce-S1)cs+8 zM*H~D1f~SCX6q2F%HQL*pQ#(%bQex0P4~uclZ`G}=ZJWUP6ux}b%N<`m`>U+w^pOB zwXlg13u}bJJxT(WgEgx!T3;bq6uZuohvXY(r3;POp2@^0f?L}byeGiNx4l%DwyD(v z(0yqZYIGQW$Hh*|gEl^v?FCMPXnW+`ePiQBL;TT1wxw&jHp!u_=EIfjjBdF5Z8l<; z@Hfz{Asx}%u3{ip8HN!m(L4zTb{WP6qCvWqQ2rVzuZEIsV&L()qQVIMkAZ=BL`$L% zHX{=^V}f*j133S9;E%m|D^ zJ`w>;{F>$Hv0)C-GAsAf?{Pg$-k>Z~I46%b&6*tZASdefT!bhGcK;Yufo5@!&6 z<}v{P1O|II1^Y+>>mmdDC=2_z80#Vrds(S8#XLC2d~dkdhfSn|P2`O&h>tzUhh5|^ zaU$XcN5x%&;=^%c<$Ux&3Hm~McM6hi^l=99;sa3>5Iw}~U5DO#bIJ}^j~q(Valr}b z5%e%;yjPh1f08C=11HbRbj)$bZDP)WfThmR?p^ErIK9?gM5~^qh6)c={y+*zXkx$p zDM_XY<~1E{tP`aR)?`l1`dPe1G-O zknt-9h8%3nc0{OdO<=~jrPp_=9-hHJG(uMCwS;+rawX&FzK>i#JBycZ8Df9v_z){9 zVxX#a?2h(pf^?NTCHEk79p*QIhS9p!di>!oC7U1Opd(ir<1|+%@JO{yn1q`Ic_b?( z)yk^ZPtyIkvC-}G^(z6BFy91Tfs8%+#ZxPqcZ_B9xPY_Bu&0r#CD3JbO8>J+zIeT# zXOX5F-Y|cZswvubkmL2HflW2Y+IA3zt5S`IO*QDur8k-MK8Z3GI!|ppm6{S-8R#=o zVnVUI;@EZ9xcBJncCDsI&6dj!i|mgNiNh~ae?=@=#Ut*c@}R; zVX)alw6u8Cv}~v=lbb4IClxTC0aYZSJn-y&VYnhF=i&pJ;QNSA`@*F?m(z={)0+5&S{by3fw4447^PFiQS=cTnTx_`F5KZamcm){oOgjP^j66WU}Gb)3^vx#$8Xe0o6Ma&etB z+0FZR)hl>r?mWWR&>t62J%?}&v(I@2UiqWjsva|~NSO6u$j^$n0=FhFY{r zB^}fm^k{Bd--e?{K|JqoFp@>eeVz21 zuaQ|te=D6P%QKG#@PEc4m&%^48MgJZ4~>Un_HWw`&9;wpL>)znTT4A=QpEGc+A*UJ z2ABr)o`44QAB+>62J~2OeiB6n^hEI|@uF;6ow>x ziJ6?~VWCkD1drvi57QELuGI}i>|Q^$=%UG3;>b0%x=lVPmdSDm~mUUGF_V^Y(=c;hMvyr8e~6Hhktu{z5zZz}3 zT#x{6yHniK*|0H%6qNC`S;W3{ssG*fa}|Jr6bqFm8(%iEayW$r*!g;@Jk^WHYdUpLFm}>Txk;>)wxAixr2_1&$nz<%h}^&8K#wk zI_G8l&ynUz&l!gI1pe9fS1FrN_v#^tIAZ!h@eVVCLV?9W#2}8V+P!Ik9@eyKF9p~u z1;~}+rRC1^C#smV?E&ncyAbC|=H+(4RPJkupxY1p-j07|zUDuIa%j^Y1S@S0U?!tu zg#|OIBiK;WTODCkmSroRdFc+6{z$K2&Sd%Yg`|ZY0>mFj7NaFu#J2z}P#eG}SnT`Q z5tpYDNpKH1A|JEVpxRPOJR=snAx@bY>$tivU1gBOxj0g@x}t}Ty|{_}Ln2yWn)rkB zqVxb9f@UA-@%uTk*YJUm)>dw7K1aeXdpNb59`FiKE~BI-xR|YAtxw zA<5GB*=xgcHF*f?+;c2RtT8x03*$Vd@Htc!zdaUlpCXHtOF|T9E3}9v2OrCSvb@4K zLCTd>HfcCU*oJqx@fnj$)BmCO-{#IYM|?OPV2r{^9?1Dv$@Obgpe+<{waS{l#WvQS z6%+a{I3ju$&`FIoFxE)s>60ou47RyAvHBu{hwYmeWu#Mu-~UT(qe2{dV8YFx9IVmB z*dn)XYfE`sj^raARbop0w{xaSH3&Tqo2@FA@_AQ>XHDruK7YI~SY^*I;Z}d1PjshA z*LNgkGw6^7KLE-T2L}kdUIP&BPtG*HZ()koBcewQLN`bWf9gs#KgtahDuHO^yX~G* z*m9}7kDSE$KCIgm_7n)OfZ*tD2W_Z9^#DR(7?&ARK>A=X0;S1qK~%r>Y!UAt9%5P= zY+C#646pnH1RQou?h*p|0{N+?NH?;BWrD?JQ;ffa*MWtIS$GtyQV+9~tvTeBJ@eo= zmWat`-A!Rn^w5FW?%#M&=l_><2I_o|X=b`uL{f;$54{Fy9}@>yE^YKdTD8Jl185p% z)q-qayi%j2L@OF%0cnjSEWy+u#ck0Q&#F*c)!$`a#Qv^OfmG_5Mvho5lB&|n>Vvs- zdQX&cRKK21qb~W|Kqrl13+XmTTd!TBW-yVmU7&C67uA)hc$W&do6{uUfOAl)aYSAe zSFy6xjl0|#>mo`5$nWo07&#>c-H2(fhCF zR(SaUS-}nSwC217bKkk?|1ab|lAqd)duRb1Ay)=scRDt0LX7FSMiibyev1&VN1#>0 zL<@Ir=JY_8$38!*80_>R-wm#5N>&7+z(7w$%Z6e!CQ{>E8A8KjJ9AqkD&!J(UO37a zTJTAOt%$q|^INW+X4`)g(csT^nLq^(BRs=uLU7cGQR!9b(nA?5TSPB#L`B;WzF_9< zuS)Su%C+iayv;@LC;~}#)e?Qp0UB4{E{_b??%ibVm=G8C)Uo~__P&!3M8T;E+jxVvh3+68^ByF!AGl zbBP7!G+4Jp4uL5z0mg-iv%IE2FQ@^ALZzYD84gp>piVWQ{{LM%W1}eG@RDq@r&~QH z@hNmP_&a96A1Yey*T3Io5zk47BW^LwB?2L>IW2RgjFgIhJSMB`>0|Nj)~Hiqv7W#T zN62+ab`9jReus$4!G1bB!=MZFR0e@xwF37KxiUByY`^!&W8g_AtqT|jM+WzaEqV;? zbz+x+%oB}>ElL&s*aOMQ)kUY@$9=m80>ZA|Sbz)rtl|Wt;qrZ`<+0Go>;J(hQ3xOPx@QHp@9AdepyK!bb9Y zGI6RssB)imNv5QA88DcK0cneh!bta01Qy%>f5+~(pQ!}zjJAca-t42LELQB=Y0;A< zUfeYutZY*)30I|A=ClwTb*(kG#GM#~o4905x7e@`Wq_zNWG!)nN{`CRxB^3u9~PcJ zDqjbzL>LsEV;t~Nm>1J8ybdoHUr>SB8?Lt7KM$QwNxJw{157Gv5*N8*%Pgre70TGt z&_9`C#1tAi;?$VX>WEKL?x*fz&DZ6hqXWB(@ShZ*4@b+7#mf8*g2r-4#h3r3QI2q< zE4O70>DJLs=~(fbz9lj|Qe3iNiL+pftYxL2*vFo6)nXo#qmyz}H~H1zZ}qrw_DRz|AKdJb<7405;tebzUmeDp z`&OHJX7hACST12NA;sj>E$e)k!yl%rfe$ICl%)azzXn>0lLX^(3u2FMP{t+%*s(H% z0G8&i^}a$}aF?T>l>|GeVB^|6U28c4IpAMG$BBh+7m|G^oNfB82E%b2~Q>#I* zep!dBGZo}FEp3N+W|+sf=t%gLbJR}&NR)x>Jy7N!e_}u3^P1{-6yc}Vb-y&wT5#db zxq0qM`LD*Q0_e^GY)11RySJ>=u2o_6MCib*^8?L2j^`(dtuEtce z5k5kNqP3=fJfTb1Jx%gw*p`oyUP1mF(VyCt=$e~;`*1a*`A!61WG6EA;6lQdM)#i7 zDcp~QgskfdY4)MOR`z8|zT%47$0){=uaJMW$6&tY?+(M%S>3-SsCoSxmDJ?&RPVtm zpwfX^Kw+$KB?0>kcZPZcFvaBDaJk;KPH{jZc?1pBq}#R1DZy}@9}a1e#R?2>g4eSw z5W%1x3k<8n)3d1gi9uGWdxSyRR1{JXTg1M<55(0}V+LHm3O&n`iwolnPfmD_-uaUV zh(^F-$8dr|!If`yFP!DHlv2CjU8ivyVFrH$<+1A1x*J@BHsfav#<9f^Q8NIlK}K)s zQ~1wIimvWlq6r@J@025~oec%Z67}AL_uy8Ac5ZKPSjDrde0Gpn(EJ2e%Qa^P`(f*B zCt$(2#JgX4$+z%!5~e;s*N*83Q$Vu$Dn9ND{KM*NvH62Cad7w6^{DEH;q=QngJjMw zeg*w>zoZjTbt#@HsI;2)~0wj?%!gPV|2;olYtlGUiu1mYf@ zE*cY7Pc)^>_prIlyqnv_7!t9nv1GR!lrQ^X_{kQs`(2lIW4}m#X;7=?kC0g-y&R=V zUy|01Vhf_Y;aE9H>+jPl^tIoXIHoDXaQs)>AwAwb!Mq1~n?_;qs zZ!)rUAGm#c5F=m&sZAGZ6(s!nWL zq!~C)4{hZW5Dzb|k*HRwBI5T2pfv@|TQ$wtIv{h9L;mwEW<;8^F6wvgdty>Jc?^*qSC$FNu61j8W=F%&GwTpP`+bgE>)_K z&|ke=V)!Kyg;T-|Se5 zD9lLxgpwjs(m)6Pk^>@B{us1-tKXzr*j;iZx!I|+Oc!ycmY>3@UwSXygFzIXIXit1r z;FDOqiRWZpZ)8|IUUScWe>Hef)+RgNEX^ZXkwIK{w^~zk4v`A2vc?~Z!fMchEo3DU zxSUdym?4X-KxSuQhUB);?=VD(nW!^e<~lsf?5O9cFmb7C{?1TYkU|M5F0vV{NZ=nR zKZO!-T;wsJ0zOU7CWeZI z`2B=&Y|`ND>?|}p9Z&DM$mlAQqkdsvbvbb$4RAol}UQbLn4S1n} z>vB*~akx}vw5tLHJu)au`1hvg{n{iekYxdAJ{KUnc6W*v@XdeU4cNzL;Db*bwFAyd ze$yF;k2{%hD3|_d&JdIIcnceihvw8eFk9(i;3&!}L`gXm7zJ9OyQ1uLkxGlxWr?zJTD#@I<=%Mu+0^5wt7T%HKit-D#I@^I%dW`tZ>OI&kRwgdZaa)p1L1f$EoGVAeZ1W z8K_dXOp&t^Db$ZR{lpzNh33qIrsAo=|0_Cz-RwMyR8CI(`tZJm!AOVV zPuSMFSEuBB=$B#DMSq?dgkEqUvCeq&f)z)0ovatzPitD}(pZ0m>JcoWn>IIJg__I?CSf!Y(1y zc$vneSd@5j;Ol(E+`-0sHnCcN+D3BlVPpDF^uTY3l1CpN!Dbh z#j=RF1y)AVg$BHcnC&Ccpe4g;iwaRj;>8GBCiadrZ(k`U@i1V&qFIvl40^ne4YY_j zz1A^nqrZnh83!RqzQ7d&(TE0IDuc*Fo&Y8i=mt^1hn$^=D@2Y|Dg!h#iZy(%A$=*% zMV~yPMD~ncl0%A3s!3E>96SrDvYYW{rYntpkGJ1;;1uJB6(JtXs(I8ySBpbN(iaXa% zApSl7MourWt2ep@GbPS)#p!Jl2&!aXm$U0jSz^HJ>3hqmD$~q^vOvOvqEMIP<(9^* zDg*TW?C)|xSxBbPg~uU*Je_&IYsj%eG|yz}ZrY~6wi3JW`BsFWx78rA%l!XpjzW*; zh)r8{{vjS+&Zmkumn%LkCT#>|&ssuycM(%O^%@Qij|JeR|Ad-0l)e6uywMcy7K1AE z;UYKWwgEWoRS6l3%4`B`OSGQkZZyjeah9%oc}%aTAo*DFmaS9ohpb$`0?nIi({kE@ zdLJ-!|1y`DOazdth5#@RnMzHXxN3ly9!spe!|p9)vnDuU;y)5i4)FcXNjOw1M~hx- zHwaTiM*Mj5e*YDQD#dL&=Dz&WdU6;wuC|d1Y1j5L_ALZ#7L&waJ3nybEK-!Qms?;U zB7M}i26^w7-x5+~mfB>=kB!dqT#-m{JL6J%1-74C3-WyjxSz|5js7h=i|uLx4Crct z^ljx`O*%Bd!=G7PO`syVrD37<&9UFdFux9GouA5~sxeWw3Ju+09yP@{yN?-+{xM3o z{~D!Jp~Lgfjsx+E$a4y#{AIBQ3Kv*&>>V42Sdp(BVC^jue{?1=$u$|_MHLeZQ3*wyO(OkG>nZwJ_3>O!JkDl{SPfwEj=J$gw zE0-M}eG=zVCq&72PaH1x_{~H{{C>#{W_bO{HS(cb5K|IAxnYbxY9+ccG@GrC+AdRu;v-;IZ?> zwT{rlz{EooFq>mWY4%oGLJE?1sp(4=f%a_>RQWtYQ=qu88DR3fKpm#gKfz28EwTRa z5tE-K6K&eWIWn@ASTJXc&;9wm^4!mm+e&^gdICout2w#6UUIw=S@GXNpQ8u@<#(c8 zIK0b^pP-#rfXeuXcx{)z#;^%%@)!w6iNLG{MW$h4Z738aekNx@eb9jZs!7Vt+c*cv zBQwtIG-OdWB<<+tv)6C!lFZVt2`&}C=^VF`KL1F0DZ%`R&RWW;=A_eX`Hwr#oY$jt zUYUOs$XvITn10yr+9$9NJx}wi_J#Of z|8wSYZj&t+#rJ6mS`1$zq@Bct%Ms@9rp1Q&_i4U7Crshtu-S9wUH{C}!evZ!@2nH1 z=@brq77H2z-F0flTedXrf;fJ1>8->o!UG#i8T(u``~`Y|K4q2WXk|3~l- z$6kUw&2w0Ax*WTX<7BV|=5nY8$;~=pu?XPZOBIiUJdk2}tEWqGLExLtP$pl!#7{@( z%9k|Al>Vn8J^VIIF*WLxFsAb)NbL5z(kG^JZgmC?*#$Ogy_#oWqwHUnt*mUa1MyCJ z=#^z%oAMsZv(*UvFFDa-tFRhA-!kxdb^67XK2`_nchZ%ANOw2lExJAq&&Nc$AGH(C zm#+D~&oKIyG*l&PvH)@0u5Nz)IihYKQN+`Mo@Jend&yMVQEjbr+$3mGII<+{G&<%u ziKCTKG+Wd#XWFxMPnM|tR`>J%ipJr;={q5jbiHEZX5Lu;OaIvON-=ArJxcLI&)E@M zr9HpHj3(G~BqTtcoM^bX`)b)Bq0TB&%{N(&@B@KmH`)n&!<&Z>9}dQ*Z@44>44LQmhkJTsK z{&W^Fbd4DK(W@d)M^t!vmIm5%;hjo(n!WBScR!1>!vz?YK2-L`U5RhEAVKB}2kJSi zR0Q^t<@_yi_div?3}%S<8w+pyNydm~t1Y*!75pGoTR0^liQ2gmdh z5|gU8Ie;z7Ko%;0Ix2^rko``c9lrIT7GZ2x=S*8yIVD>s> z5!oW{bfTDhhf2N3KF7fClg{(s?MU&aeBgEhF~tEa=YOjBHb~qz3vXKqsp8A`iS2IH z>{?(R_9`f4=qt)q)Yk@$67H!ql&gC8pw;O;YjNz^N z<%=XH5e~_6rHC^-@~^Xh%Rq6U?>*Xr9%Pig5mxUDIJMj?3BSi3(7yyXqR z{i`|J@yUq#H5 z{7=TjLu9!khIY{NNyD!JTE8jD2dfz-%k6V^aW=buyP!fbjO(g3w-W+*~N6A9}eWdy_5FBEB zj>Ki#>fX;&hiRwSH6odO_*X> zN4j7u6u1(APZRfG?J5XmO&RAZxb+|$5|$;+)LBGG(3a6k!gX;d=yI0io)O7yry5K% z4XIE}Gf8ti0}4QPi(T?Jd4HOc#qm33a`A6QW^LHJVWr$`-=guydTZGF8NggQHR|Xa z7mwJ|xA?kiYw;K{u~K`>Im3Y&JUr09~6gS1)vWKUW=&Rmso&u9bbF*Li zAT+oQ-C_gv8|eL#+NAYbj?IzqGZH2iLd3#{C^srb`C?{=ew&v&*1HN{-T5l|VT_?M zrU2Hcuzh4M(BCo|>Bk=8>La_r&8kVy3IeLa@dUk(R$QUGPI5^WbLwFnjJ1`5uRE!j zn}5}_CCWAEd5Z4TzvD7wFuiEQbsLq&v?-Vs2JD&Rws8uB9|xsdvsnFQwXpD_^Kn}0 zxrdf~Cqpa2ET1Ir1Iq8~jTC!NMXEgz4lRy*hOhQeY}@R;&!^1aH8@to7b?|#4VmgG zC4Hs0wTN@~Rin<*9?(A7u_#Waq+ygtow|#HDja5H2PAK)GKJGl6LXId8s1_|efK!Q zrRh`dAtcZQ+Z9~PyhXO_r1)YKxX8lIo=6XfW4)??2-#S1;f1Bb_qYt$!BT%$t<2E< zQmd`cql?WQdW$*h+5gGA84NYY8*A$ZMA*4@#V_PXSr5}B3Y^nSsE&L6RfQlut%kEm4kTedcg-F*+l)6@E{ZKOeU+h zjPTMy?S)~U$8nS7@wlj7r+fm^p%sN{kUF@*=o?vff1V{zlR6wGGct{(5 zz0mqj%dxGT5(>*>Lb-R1*Yc^rKu$7yf=6(DF;4Kn&w_ErK$;Ti$NH6T+Dwy;Ser{$ z;0_O1cZBMWP&LOO_k-Cu%B%vV!gcM>pt}`$V^9J+mD1M+%#M)@Ece$BYQiAXo@ zKA))k7DUkDmE{<^9J*UL@X=9!V|ZS97s?Ii?O~7VN5LM|3WQ+tjd_9Z+CEg!h^?Kp zJ9_dBuF(S@4Au+}zp|vct6ymcBc%!_Hn)S%2B86j_p5TEJ6aUP?vCGqDP+s!wp!Ij z3&F~zVw7g#K5vJYBZPv$0?m-FMv7{mqN)=b@kK4fCxwnuTe2SEhq=oAG#cegVPG&U z>PS#U#4lPjW9{KkREMDz{zw9nDBr)eQLM-O&`2e)-;b0aO|?;~l6Y8$p}m7R8b{42{<8*P zB=N*wif^pQFuUTxv1tOI*2M-@c#y|v^QQ#Rq7f;WC4#yXd&;L&?f>v^&Mc@m+m9C!}o4oB^ zlO8Tt7(|aCKr^&_gZsf%ZUR2d^4kw#kB7x^v`6O5e5s?t`iy(b#v~Hk&e5$o0pJ%4 zJ3gHC&F)7Pm>KUzJJ)2bVJ8S>)gu(p4}r{Uh$V@1k`Z$T+!B{_y*cSQbVr<%>(K$$ zb5ncE;ModNm6hy{m_=X$H1DvYP&J1^hLfkKaSTsMi95uaj-mgj<3SM{Jo zj?>z9pR+i-=aVOzZ9r9dAm;lUkdANFQp4h(-ZbW8*xV0rsT%YgMDg?AS!%~|N2T|O z+A7+nOw7lQbG6X+-JTGvJX@wf(OcpsYBx(Ht&5=4X;yTr^?vuUj~g6+JW~!H-t>Sb zxhJg(R#d&Q8CaZ_qytIn9l15*-#Dc`{miMqP>v1ZUrNw6wKaAz%D~Vhwnq(y_DUV^ zo&_!Hq%Dinp-G?h=vA+f$)?8Ig{3JxX}=H=%&ag!5f2xPfai_9P*^D>FMp&oHYuQy zn8pPJuNCEiZej>RR}feYGfL$W9!XSuHN+2e1~@1w%&&2SY{(9s#F2OeDXf2F*^trD zCx!}c7CW`CG>OXqLLGx38sRPG3Ex=iDqslbgchJ_H`1Ak&z6U{(eCDkokSsdb#eFL zpfPh5i$w&0Eoreu7yob@|LL(DRG_@cW=w9N4I}tR3n`hOo zsA!tjg^z|t5+)1PVlnZk;NH_X`L-8joJ0zE+8vh2ZDqFe6;>dLrs|ky=DQypWc)g)x5nC_XUS?5>#pccC6YOWG$F5qkU147qq$R{}Y$wv^OsY%u}+ zdUH3iI3FDp0B5*_)8G2d4JdK|FPX~qqa*g3FHA30nv)~jUY4!88KJJ5SDZ~~TbB`i z{Lx|WBMHCjFI;(E({WT4QWP6P>86+%)}ChY(j+ZB!6-o7^l7LY(?uiPNuZf{KAAJm zHn-}eN|DWPArbMy?aw1QmnWr{2gVlbbucmpru8Mw=L40doN#&0uHIb6nK4K?k*D+p zP^p)@dtf*UkZK>%BlHdoR+A=0mv&mZR0QNexFVi)hE#C$^9`VlfGVXJw*v!@a}WEg zJ)@?GY`SVeVm?y#K6-*5_vwNh0eI!oqO2VB`OGJgo7!I z*-lMU=VDn_jkU(h4lEsJc%%v=9lp9Dwg_QoNi0-(`%S@_28@?TrgL+1!$4np63{BP z=#yUEMIy?lfA)w(P1i=|oKsUf$f3dW(2XmPU{3u=1)>kWj+xG46~Jez09~ERMHiA_ zAw{@yceN|ylJ(C;hHgzl6qr>}?RfrKDDx>QHXRFl3JR#S_S~%Th;{7khCxvwe1T>K zP0ITsZGF@c!T`e=RDFSpf%=?f3*k-LrdC@ATb=C~1TACrdx`coy<*!^#+4?n9$wx6 zVCh}N93?&_p+=b6^a1NYA)!A0gzog|(aB4AkSz#^gK6@&+>4$;d~zz~-tS3uqDaa; z`LB4x_ymt?hyB@XaadNejuSnHmDk5(ine=oE(ffrOnkCsJeqktifP>-%HRad?7+gL zV)>?Yr_t2Scj8I*oW+f^GPxW_C?L_=SJQ17AyY$~WYXnI4Rhkbv&ArQqr*-_ob9Mu zUer;Iu6?M0&{n&7TU0Pjiq;hyEv7(+-!V5E0-P2G1g#odA5hNOJ_YbjPehW~j#BWj z;IzzguIs+G_bOs|IgH)|B_DU}T}?lr4=kAFCDu6GvdLkjI}5+(H#_Oom`uX$x_C%V zrjU{~a(vldMuypi@;kmrd+Z-A&P6+NPQHzl_ACD0xtorrps+@G!#cT3>_-6nzOc9& zA-n0wg`fb7P{+glw0$MgUo7%-rn7d9iPEbIF8NgLF)F$%@Q{EDdR zAF(Awg~(N+vWRjD#e=%Z^zqFs4m=&xM4I~r}E zycmz&b8XlLB@gPv-`jwJM2ylhX}@cl9J7J$6UK{)gKaFK;^|BYZ5-Sjc(C)WxZhZ+ zx%vt}s3IYb8ePYCX2jMM0@wgXWY`2bRnhvp&05JORJ@JOs34<#xvC-3#DrU7V5R&| z(OATIl6Be5qU5JBN}aUMvHO)U>{|%V!o(qs_3nm!>yN&{+@NX-Dv3k;1AWgC&w$un zkGSK?^Pn>JS`e%H8Eun?J0T?jj)Gi^#N{-c;TjCpP8^xe*uEaio-_n+=n97dkqney zDSBKuN!LdFgIy)t6+%B2!eT^&!ysvUCYg9cUyPlARq^#+Bp-Vt40iJdoWs) z43;h6Wj$0jNcLOKFdQVUO`cFmKtoE~XAePozDgw%3(d5vkj@MCM^qHq;hmfI;>R|n z33l(Bgr#s+><5L4Z5~5zu%D^TGeeNW1<8JLjTxvHbf?{!QX9)}K|*E+gm~h=Jm&3S zfb{yc)y~N~zjzBXDO6YPbRbqH?%Pl0&=54rPaRCE4?0EkqkGK|9mocX&I6Cf{dO2- z34c>V&_q@&=-th?lO;tv;dpjY%ItAY=j0XaNJC;XOQ5t&-E>`S>t$adQ{bkDsio?Q zQ>0M9f>gi~{-HP=A1v#b5%5f=D2$ZHZH?zf%6r>Zjf_pODmASXnaL29WMXcxf#>G! zx3tuhDJ<8#k+Id3(cLUr4w!&Nv{mRqTx%4#u-)uJ4iOrkLhAvX?c8@A?*Y)D>piH@ zc7=E1qXgxGyhcS1Zg0_`j4%gz?T2mZpxi7lS_s?Swy*cR$@*xs*ckb}K^U2Pqd2NuUJZPrkzyu9# zUQY&L5axf5vZRunLJ_ zV}<&)VZivu5zv7WIp@8&AA=}AADQ2>gPOx`ce)FaKJbpM!@u6E%dsgGyf09{YY3tB zkwAon*#VjLzD?1F%PD#@%P?o4&v@f}zIuobf{lq$ubr~S3p5p(MtXS(n`6@rKYV}6 zlW8uxmzS4|zr5@}79YybjA^vAx;t}h$z*EC}f<1+?||1-ph|@*t_z3V7~kpbD%5Jts!UayqJD&4rcuv zHoZ5?Zn2-}v$Wl0j%$9%3jG{@+w!hQb-d{?j-EC(U8yq8Mn0CKGq-$edKN`B0_v|2 z{95>Gh<+G%_5Ag8&Gjq2NY-vNcgfm-V&cW3yYT@*0Z)1Gvp2tV6Z`8VH=?D@hnNc@#{>{ z8QH(H@DoWL&w3y_bEvqRN-5#&^4x)}ePey=ArrG`?ZAH`b_bv0^XjX;uJ%_2+ilqx z=)hSs_s7G-n;p)P2CME)TTmLNoO!dGx6$U@hdJc3Ffo9Fm@D2 z{}j0z5R~}LvzLgFomtWO0{ZJZc(SX^S=)$?SFRuLiGVeKI@(DJ*176-w9K|X@PG%m^cKRJMoLq*5`Cg5p z_KS-dMh}bcfE_!${1jY6*t?G_V(ih)Z%rs{GufaUqg5@I$nhy2!m3TUJr2UjipJu( z|73RI-ieJ0Q^wK42=MJ3o{XGa6)c*ogVj>}UB;2QpJ`SLcCAlD;G-5XHXrP3C+OsMW= zoPf~cq49@^vxwE3YXsVIo2IwI9EKY5+3`ta3wQqp`_H9)aa(M`!)mThjH0U-RI@$1 z2p0M{3VIJh;yZ>pPJ{;)+C0wFdrFjo-qaW`RmNW)0cIOZJPG?eC3ay0BNUuI4q+V# zrq}Vsy@Xn<3W-Er_QFLc0E8+AX(ImF9MHsclNc%pM*0hrSaf!fe$VvCG2U)JOi^D* z=C(aPTb69XF&nSvUb7R-y z=(hz;IDi8J7xSQN);p)daPJpdubNHPnW#jMqIf(xBF^&qk?CciN}joXb5P{6dW48w!YmU*W44gpD| zlM76l;1h^YBg6Nns8)vW{IEs_5JfCRWAaFo6mMSVYIib=m%7#dW+rv#`D2|O&g;4> zF8!_4-SV0IcEMujx4IY8^3}KV$X50aoWaGM_K!Cgll$k~-v%uoGp{zg-zjC|fqI76 z=N0o0@DPCMu|w7PDy_8xNn@_^3V?|ANIs?yqXXNrp=-kIrXR~EO9}vQ36B7rXffZ( zjS%DKozOf*n%3-nSdlVgXOShkWVMN#;NPxcFDH@kLL|d-q>wqp#LHJ&PY|ol@V*JC zi3ygVNIGcKgbqo`NEWeEoyNPsSF${ItGe90T{KpxCSylt=hqI6s?duuJ~qSAL06!9 zZh_cXExQO}@nv?btWH1>EK43iHX@9nbSlu;d#lKIPt3^aKO!WjJum^Y1ptCc`qXLo zBP#5a?!*U4T0kr?64aSBX-hGeEJ2m$jvZQkHOs1Oo!mWWf#Z#>vKm!qo9*(>PR=?i zRL(p`uRo@14ClAti%JVPa^!T_ummCcfJ;q4=`?@vF9m&9bedtxZR0+0K;Y_&IpTJF z%XVk$K<8(&GgVrc5vz*N=!WL+4g{JxjPQa4`}_9>-Ymh|$Is#=7rjsov0Cd0Z{&0Ed4-2HT&C|K&P=ejM6z+iSNJ=RNFC1I+LyzXHk~q%I4jw zQ%Sxw?KD&)9r3)+gLEc)@e;OFtl2gCxva8&i_kO5{%?+A$$nMSQI=^cpD;4=qLP$6 zy|6ITKSi>1f`drN@(SPOoSa^NJU?<>>;43~@kZQS6+1nj3Zj;FP|h8<$BYS*I%-Q9 zum5%#@Q5PPsbhrkJn^E@!3uKcTd61WY3(d9V>)64S%3VUTEaTA1-akVoc85K^hb*S zGu>{;$RvhKG?o~;smd3HmJEOz0243gu7c}fd7u`ci-`$d0hJmKf-+q6>f0X`YIbGN zOMf0j6j}cac(|j>lI@4F60g}mvW0u4t03JIfS79-qOI-XR;YU_OrVtt18B%V2lBHh z*?ivpy*q>m4XwB^a^Emi;gW16BV-au7wHL$J4e+~F&5UNV~&R$D!Zj0-|pT(M`xX zG;6*lrM3}PCQU0`euG87$~NoLeT6fQQe|_i{A0U93ihl}(LLv!6>|QAUqgqJYrlW5 zq$<=AGKU$JP&|>g4FeW)Tce&>Wt0F<) z#0@thpYlmmjviU&CgXOc<8J`fGV#2diYC6ROkzsn`Xh3X{$YVvbv+$1GMdkVQY0Scw(Fk=`Xv~6dCw^m zkd0a%)-&hHRj`-iOSv#>@94F!KNrmGUsEPhOo+Hq)$u54JY7(9!^T_#<26-Adq)d` zY=j+TipT&hgr_#r@OVr6M7kI$ze{KzZpy!t7*Cr6WAiAg1yWn+S_WT zpWa?}onfPxNuB`(dES1U;WOHHlP-ivaB;!G&|bMMl`g+xVz)1}jA0VbhqE0e%(DA! zS3cewk56l?*;;%0J)!eBN=|OZ1-(b)6aQUw!)lEuC3H&#puEp7i<^UZT1g93o33Xrq0>DIHLwKTIDyLDhQ29+N8p`#S(W z{7(-u=-PHty}nkt7-Vo=)>4=77Pw68JbiPw5b)(uK1_@@`v?xqo_T(WU(&F-I$3+G zop_w0ux|VVQFI{E48N$CfaXk4c4?A@2(ax6#V`r3$!5QcHPxBDLp}ObyaM1xV)rJC z=flIA@(VWJcg&{T9WxSro$+f$Zt=p7l;ixEJBIgYWkkQ3fv$NliqpB^_6uyBiTFfP z(fndi49mhv-$}dw&1ZfyXF-)?m-Nj7eOGH@j?})FAe~Jm_?_*A`BR*4eq?pYZ<9`ii z_$?)|1tC7myh>l%i3|+k`aZW0VR_6r?Mtr2ZosyRv(-9WwlhQ#9s~$SiJI}Ww|EbrJDXR=-;K9WXsySq}8QF)hSN`h%%#e zftvjp&t#?Fm5)#^H44ux=?!y0AqWZ;G#`3TN)n8DM=72J36iDyN@5NW(ViD zqNnRkNI2%Rs}>%2L%PVHh=3kFb~YrV$&?X4 zA0F**7xvrgMsMHBwKA9&jTM|CfxTJxJyq|OdZw(Tlv+@6dITK{wEz6~I5*fS0J=dv z@me2{Fr`G^Gj7@BTxy}#`YN2@_*w8sm~ei>TsMOfWQ_WwhUTP2+{+hUvho|}-Oy3U zP`IvZ#!A=ES4%~pu)zF|XuWf%0fJv&(Tol!w-hRPSC}y((Lc~M>8k)ukCC_lg@Oxj zV%DXO_>vxe`~|N0`l_Vbk$?#3^c4RZYgr?vg@ilhfjiQ8*T(a8-Bowx0mXf;CWl5& z@wW%moVCNXpUGlY_6t@}0lhrG{?mxAF@pB#;6nXvCRV1Wcv7^|bmUaN?v6{{NlJ7C z)vZKue-T0m^C#-A7N$L_L|CZ2a&0$#qxTPVJ_;MzE$mQDK5U4HlirjpeHDK^CCfE; z{8kC-Ks(ziz**H3-Bvf zAY2Lfv<#N{JPMIxH+GBt&Yk{+TX5|Uj8iB6jnki-!}%J5Q#FQwjPqBKfNny5n>_ZF zYv>_ym?N^*qnN+d8pG7UoN*44ab`f9i}VjOYTl1~gzS_KgXdQ6a>%*Emhflz1Cp=S zz;$ky``2maKlW}Abs(Liol8YY{xe#pyjbCL+X}k>52QDd z&-m>s9>1Xj70~X>{bzVI%tR$y@izE)G1A?SC88h9BacEf$TR>rQl_F#&i~V7HJC~m zM{i&j)$6ml@0)R5IXFpV4eg)kr=*aV8-S}xKM2E^Q17~6*c4}l`#HLy)mf#0+IzRw zBhOl%{7L&VN7HZ>fUwImDl}LA)0UQ5B{}W2-gQlop{#!_B$YVsNHq6<)pYIQOmKhP zy&om@t`*}&cr%ibB`uZP7GBAU)m&f6-Mr<_&n1%d3fm0D+*hs5h;3w45_6es?jg5w z%Vl0dEb?3QJAa(#obO-fdCocC@ALV5&Urr8)KfYs+P(%9Y(#UR%s<*-N+CI+SSF{+ z*WMH<2?ri>;8hg3+)GQ)pWk0+Q);ScN5V7JtMvk-!{QoVQPr>i+pj+YpF^KqS5+?9 ziivwIovhZq8b3tuj4Z zr&JEHw}|QNC~SW8Jmt|?^K}cjnTV52hng;ymK?d=d;q}9dI&XqK}!~h$sGWUp)4e7 zfJd!uquUjs=*E0ky@*N^kOincn&eQEkbPuTApX^;jwo54>iZw6(L^-_PxBqLT~+X7 z{g>Jww;~%$Ka{~?puQY(WnfX9oS*t=SlfV|e5LIWiizS+JKA_9kW>ZovlsT1L+3{GL`@Cwx=KjE5B(1} z>t)xMRK>$~uF;Vj^&JHgAHieO7>AP_4OOc7NjU2CP9=4D@sTXmMWN5I)j|S^nW~KL zu9S9JX~5&5UCccbJGZfOVT z8p>F@w>;Th{p3q~e+@05ow!i#8oC|d_GX-C)ZFT!rgCiAeYbNPe5#Lwgatopx&I&< zN8M-#Y#7=F(kE+UL(*X}A>prr2VC4J8mcfYwcoA6#;*i?kH=j2SxDb)eL5(=c?2kg z7*kR$VRgrX$-=}AMwguG?9IAWa}21MskljBG#LhaojmWFojje#wSGQO+l$avg)*mZ zAsO#)3g0qk&Wz|+XkY&%W9*R5byk^9SISl5#x4wO4-{OTTLqJ^=Wvt4{mgm|iJ3h& zryxZYBB_!-Iwl^XpuJs&scbRm;liIGj|*q*Nq$xqPLSq-d)ssij3RTvc+~6I%M~^&mFWK3++tnlEh#g^3FfQ?)>*t*KR>!_$ttgw=cf7iPTPtN ziW2H1j>Ny~G2}Q<$(YrTob?T%ITOe(A?Tuoo6wv|n4CD8YsTcZ{-P6NvX0?-ucMJpCV$Jl!e}ght@K;G zg-zr86e%#5TZ8?^R?OjGf^?uGWWm4ca1TSKWj~<opEkr*!<@=aLh93b#-$Q1 zMWMyFFtql8%V$sIxf0a<-R-Z+RB{c}BujFzNxGn3`zxRD;m^j+eJ`qzmp3xqQ({{Zlp+ba`tjZr}P# zw{#S6&aH7s1b#`s-hJ~ZTSfVulr`RA{+Z&VYqBMZz7vNLE}>v$RsV}ifBoV)BCyQ| zo`h+i8;LLdgmu#!LYO?``hI>&M;(U}Xkx`oP5WWEl+&H3xqX0zDUjn`WGUldCVH<5 zdni3lOzaZMx=$i(Y%e7gV`k|Nw=FIlXC$(^3EwyH{RXts=n@jn6Ty1hDBxE*c(Oix zl;iBxW17@!z5tUaQsFFhwQtLVGjb>j3zX}rV_!-8Bildg&`(b$Lk62S zn6Vh&G!~4yhs)9;>vf&9$(8qgfXnjs5srP+cgE^+(JQztW#5K4-glgYg%(xmJ&8N5 z2Bez$9D_#%cHeo6IpHnE_7}Ct2MOVkll=060C?T8rA4{+2%YpXYVPqA_2E}4 z%v(JcFI_{86ZL2?z8mf&Zntx0=-@u7UftRRx_n(0Y3_A5&PEho5QHqhI4*@09? z4D5^a)r`D5hhdE6Uxf)BdG4V9bf@2bN1$;IY}zrLt{*WFYzEy#c%K v{%h0DLXD~^H+*!ixYrKIsl5-*SI;&Pw)L6u?L8tQ+uLGkJ^ms$_KN%u6QzCe literal 0 HcmV?d00001 diff --git a/assets/rancher-monitoring/rancher-monitoring-14.5.100.tgz b/assets/rancher-monitoring/rancher-monitoring-14.5.100.tgz new file mode 100755 index 0000000000000000000000000000000000000000..6a56d626ceb6d3522b718f0b4ad0790a8a2e7c04 GIT binary patch literal 274062 zcmV)gK%~DPiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PMZ%dfd3tAe_I^Rp4u7*N2GgmMnSfPG)vBvM15T_Gm3R*?scO zt3i|KZj4BR1Avk{ww`BiWB-1a_Fl>Npl~MH2OT`)B=(OPH3=39K%r1or~}7UHYYqO zSw$t|w3-c%=9tTLfy?4I_w#Qw8jYUr@5BE`qfz_+Uyb(nf3v$k+TTBTy7%;}C%+l( z9*lOM{sxWissEM#8X+%p(s9X%^ zq$rb20TB`0ceRk5;1Zp1_7lnE=CD~!IF^hLFPaHS{EO~@Y|AkkNZyda7D5@8xL~t1 z!%`MRHNuT#39W?0MKMO*9g<``|GLC7oA-X65!ELpx%;-aSx!Y=;6QTezlwC0tNejnU=a z7OrdebCm8sO-EZfsY#WSDx*Y3zN};QZnM6iWgqwx&fYJU z!H1Wdb)cK}^>CWaN!}F8r_VLmtS8?*>MdZ7ILEc@>95;lO^-bvM158)CokXEjIZfU zwM&R;xOt!PXHn1mg6yrQ&)!yR`tSdy(|Iph+7}yi(IrUrw4WN?n+@Rh(4l&~oCu@0 z8+5RQ{P+dgXgzMR9P3wO*RnD%u z=Y@Ixau^Pn?3qc}od0wy?OR~wq!DMofJ=uSn*41q5f-J5W&q4mN zsBI+yZ{A8!8`C5dv(!r`SUvdW>)mwsX*x7Grf zC^642wh}~2Op7s^QC#t6LfL-4x=9FCa^zgqVoh^8P73P<;%2T=^)eQ6hE*e3i4-Vn| z)=~*tfI0rnI%a9X|m)do+^mZN$m+R>r8ILAFBU+1}pj zJ{@l%WaO$blDr{XD8OwDhOBLLG{@D9API9gRnwqkBK?RUXW{w&l5nm>8OGtE|2;)L z9DOUYk%2JLu~!A6!!qkpDlRhsr3AY%dOmtSQiBk+#y;Z&OJWA6Ci(4OUZRsm%#~yt z8n0H&zd>vm&UQ{NlgqvIAkpXFu*TU1o)O_jQ8!{$ds_$vh9jWw%z;Wur+j1uC6zQwR-5lfySsXp1$o6XT+Eq}Zk-Y6y{AM;zKoBJ z(GrwZ&I*!VIGGb{gSykYz!eE>kO)au5bo`;2qlP2rzDeOG?EHadx6$0~Kku2l+USo<=IzOGHzcOQOh-eHhv^HtzRgv%F{kI|6GZ0IRuC`1u7`A#XXzT4z+ zT7#Kf{ktHG&g`&SQ-vry#D2lQU1> zs3tx0yM@s9HaZK9XhqP>scvNUZ9LFA+K_YWL2eQLf^-K)OO&((M5=RcUCqTk+mhiLTDZzqe0j5t+|b}pew+hsQiZi=y_B$!HK!XRz0X!tp(D6^#+Ny9WILa)jhd*7V5{KS=ChAwU#|Uyu`G?6IxKYSk~qx z=WHo}(}J}-Er@JA@m%0fU8JM;Z&*$a)$nOGTiNI>$r#VOtuzZfoCsh}eXr`ejj(?4 z))GeV+f&Ino{|1{u&G+Fyky^c9(dJ;vK5h6j9;{*t>9;H(bxCzdnW*SeV&%?Dh&?b zw@o!+>;g5sKs6^*`W{F&B4VIKX!dT2L%bh%c$*(SrSEBFZ#ltl$UscXeA{3`@+Ko{ zV+OJmkUbI^s}=sZuHV>KLbH%D8l;a0W-w-Z_YI)%`x%-Z`rw1!_F-^>`pA5hYY#VE z7=ZAtr$V)nHO>ITwN}pqj-t553sQ5Efsfi1UW9&RStBGX5x3Ng7}qe>-@7fdxH-UN+=-z$=Hlbg*06KXR-C4P;?N zdwmcar--*?Mup@HXIt9dMt{>Tka5HYyt!8+kMXn&9i^QBswuZ%3xP0t0rK)Gk?I5v z#5((-KocxTj##y^Q-taJ+=8&uFM4@N_`=PA(n7UNG{=`pP`IL%msi*njndt8^m|1r zjWE&_%mI36cRB{S!McC!r3dM=N3ORshMIcDaZV!0LJcLvLF^zw$f}uFpGS<(I?wva z7K!PLN3y<|ijG;_YW9k~X0kBvL)Tf`w`rjpNIV;(%iVNulxo`#s+*!XVFk?=O8R`m z?Lo3IzVJgNyg z(mNWO8exrHnI0t5>Lku&$A9+dt=KU-#cNy}FW*y=abg0$I--_A;&3+OWVRX);5YWQ z5|hKX1{Ah%&Y0cLFx+4cYEi%J+;r5UK5U&l4!6^qWOTYfHOs@}$v1c#GJ)7MlCw+< zGgf7!mSR}Lq7Q49CvaOr!9EC{IRBvDI| zG2?SRR1gyYm(-kIVo4Hi&aR=gZ^7%6rYOuFG`5RvcghY700m2y1DK zw#ZxAVHkcm1Q8)rsmy%pvn}q#j3K_3H1c3gk(^^02;Jz47KJ`^7&7OY67w{V3IV6+ zgb6_>1+fm0;J&!KgD&@Mr>)>Z-K*ic2y3CAFJ!18&hFodUb0nqBEe! zXAcw?y~6fkTQWIcsITgHt;0`ot@|hy#_AqZUJl<4E;AtKXH=hi*g=Vd^nk86G zfbU&*Zq2+(iijwZ3Pj7Yk%}~s;#g+rIh;5^3Y`4hP_C^uCojHr(rZCfGyO{&eRAE+ zO8qMQlM2xgG2B0R^8A@yjhf{z9P-}+@_I=+;sZh@t&FZ;kTKFWyAr>DS7HBjV0EjA zz~V{BDJPkvm%aRC)s~FF47a17S{VU&@UI`<6`4rJiU1~OjrxD0M6QJ7N+NQks>Dcb zszRp)&Mpki={LIfvF%d9A!CmKC;{h_#Tfnl!B1@R;D`3BD@^4WJsFAi3vcv3DE&K5 zYJz3(^2t1WnGp&mng>E5np8~EsUcqRz~N|`35TcUrz<~02;QC>D|9f(1PM~*=Ss*c zE(5{DE&`I|y&R(lA3iuQYBT^~sgXHT)nb5Bbbb9G{1Dgl99)I}s4=@DlR0A-NyCd| zPB{6-Hg?|G!I;3;F5=%B_2J`GmURI)T6Xae^2(K-N*K$zIz3q<%_0QryKHg$S9a;L z_O9vf-rT)%D_S*0VMPMKBDnm5(EhQz!ZLP0y{3Ot?{YSCZ+l@zG!YPlZxPL~YVlyr z{90k-1PcLdY$INPNuUu0x5faVTId^z zW;&(s^+`M-s9-oJd5TU6F+`R+Izo8DE)BO+vKySqI|%1FVscKnfkgz+EY7ucDtfb{ z`VdNnQ6d=J)`W*kyDzQ7W<>)RL`)Fh#SKr=PqWl;Xhut;S%%^D=Vti?%=Z|5OdMFsd|kg(IofB)0l@86xE1ic^< z(?b04*uDEc(4%mHd8u@MZ9@XqugOeuI-3!$L_HMFU>f5@r)Vb8!-7;O{ge7y+kw-+ zg2?{}jlz*Vh8D(-J$@XV0}2#}o52XmDdUplCzx^(!a?Xi|Ih!29)I|NQl&ql>+A7j z^rQL--Jk*xz^yPa=c!i61Z(l{IzoONJB4}U))*I}xTHR%z1f6X4 zj1vF}v=OB!zPpr8ZcRECYSJ?eW&zy!9L>ew4`01}dxjDieNW4P*Z`wK^nKrM)xRDF zJOsW|xrpGLnum7vyKR5GK^Hb~J?z2QzQ&JHIM$w)E5HX9uT(T;iFs=j`k5i7HLW-7 zrzAq)Sv>2n^|PLSdRlMTPmrY`>gz?2y`bCQlCL)G(_F17c{}RU-u1nE`)d3c{juF? z{MpB`s42(AdBr3-uW>&9W*r`2;V>l7|L!d*)v@SFf=rH z)H7ViQQQ18xS(6jM1Nt%x!EH0di-+YaB)7@v_ha={!DRN{CMxeQL-TKfa#o9-Ok|MVajtVTq z8}BrA8eAO3hO0@zX2ZSF-e@@5ACC5hw5&P11m`M|NCpaq?G@KFp;Z#xwJTgBla^|p zsm>ENO>kvDHE6W48*6XHt!1XV=q}w}5&fA7DfES$*1IlguB7)VI@aJ36?U?tOD?!X zbFOU0Q!g}<=B&iDf`k^fvlA@m!d~r78Mi|)*TV@I!fPz&cC|pOh6M>hpVlNxK>yrFH+0?Kj5-vgvqeqN!<0moN<(n@aP++t&D;c?y*h1a@Er^FaY%yO=4#H-Pl>Ar z7sbLEQ$hR=d~Cy|l10lE5@2ac4wpMI&QXah?1(J+xt}fCGrEu^zTW$YXjr;$Zy8s= z#+Iv`>LuHWTPnQgh0?#H3F~aHMi6jQSLYOr?`v;x5O?1%taZ+`4v%iF7eJkyu$ z$lcEGr3dL(=E>6Ad3bLx5RX^|Hd)(UQyq#ota{6sjPADK#XCW?L&|6=ckiWf*oKO3 zaOKz**wMhOQsP>z3c=Yto>)%c!o}VYY3R}Hnl~qezcp(4ZTt@dV+Mp^&7zZgbGyi{T|qVHIvI zmSCAePrqrsk|ns$?J;O~Vz@MeO~o;P@U-Ko*04q`nt%o3_$1!AjQ6-)Sf32ZP(g(x zN~(%BvCVDt#7W74XAZcAAP74|iXPB;zPdp$ zfR#+#u;;<6fT}(fogGg>Vc0Q;7{aaS(60MkOhLa&5UJLMimB~9aIoFpMk1RNb=3a( z?Ciw+2t_U%0UV#GKyE+ELg zl<6XojTl-pTFDfIyS4o)BAS{N?eQGb#TI(=u}%s$u}mY@~ z(b&B|A(#_>#x6*ub5(tb>isv3>NV^}s#8fI3?ck*K()*#J#GzavIgIZa>H(g?RE=w z(DA%V;wr~HM{M#F$>e|jpZ_Zq;o%%hoTAry>85N=@Y~SGv4kjsWr0o_$++xl1w(7D8!SDRhOhPH;(zX?cd_~+WfX*Zn(;$Fg9?KtvRdl3`?)pMu&3Ea*@U- zZ*Jj6XZkT%+}O13aqGG8IxAO6IcJ>Y5UHKuHW+L28l`)?>Bu}1a~v@db0n#8XoU)* zF|7K{iKtnnJ*b2DIcyb`j>mP(g5BCP&4q1GrWL9&_o_afRcbKkq4w7SW*s`lJc8v= zU$cr>_0Qqq%?05Uc1($}9m{H&ChcuuWN!D+^r1A{AYih`Lr>H6j*-sA9BXt+)>JZx zpL4Qv`0a@z7_hL(nR3;Z@cm#I`J68=l4C#p7A-Eb56ppHyEiV zl@iyj!DbFh&v=dTfC^_L0ONdU$gxdsIe6zW*|~;svdNsb{3*fuUS%Y;X_?_5dk|fr zJb)<@4*!j2Cz=@R3U?aajl@KEIwF_}z^<)82wft}_Y_~SvSEg$uVJ8)gL8ms09 zUg3sxh^{4}2&nsgkS)M`4SYt60NP5SksIh2HnyW$s8t>qfiAIa7;isI_KeYS74q&V z21D7)_q9^OeIgy}&JFo;0-S=EVS=3FDte(&9!Jk0HBW@+n%9YrGTgq~97mJOSwRN& z(yKj_H=Q#hRt2L_;K%JjTJ(M3d7CcVny(o`PDREp314W%hZZ}CDQz#Clk5VqtIF@_ zH5BW*z8UE{@XHC+bh8P)I%qM;y2q=iwmX0ulW&CS{ZMi1HhEQxWN+&x^SySW9SLhA z%k5{bOT^vo#_lrBj^TxNc6`NVO2;g*F)*Tcf}n!Urlk*)exz3WC&4Q9VrC=G3pUdq zj!pZ(>M=uw5^;N@(k8?+?!3#a3pRVDz8!f&N`#cXapktlyP_Nf~jeDZ!~|PI3uOOa(yEbg@zZeesHKQXt$>V?Lcp(p&U9iI0i0#2G@mw;GF~K`<>N30&9=E|#C7OI9=`IVvzM&y0NUcbD)g zBMLqb?p|jmw@0d|zc`@I!d|xLh+4Ke9C{+kDKSelYqI z4K)AYgz&bQGay@S!$$AkzFM&^ZV8={9yL|2_mg%ysfZd8=3w8@PBl{PG|ovT6?D

9Df`5Sm>3rqj|@RaXe1&=(4$NT88f# z6_Az!?16J!<%RkMo6ln02r_Mo9dB?8o!7xV#2BKfK%8803U&?vsDrum2+XqWIJFRG zitj|ws6axh*#@(#9mJ%OMtOACCPX+g(2IdE%IOqBzhyMw*l4I&Fc=xtab%J~Xzb%% z?|tCL!QJxl)SgqI8H1#jNt*=&v~h(?u}~AB=h)g$gbS{kF;JCtI2Pek#!Hg#Ao4yV zwKSP`3L=RG9UCXG;v;RX*qmPa0Dhx3mbNCdTz6}epXSJ$+17>$2RBTRUQ%x)#YIAd zqHLF;xy0b)#a6doYW)jZQWL}G9h4=-18JC8~YL^iHSP>*}AJYKIE9Ne#YDGxQS_@q7 zurVu!W)0@Jk_1je>**SyW>A`Ap07ZO3iQ~-hUd3JsISj~X5Tp#z`5(ra@YvSinU&7 z-&xE2y_D9r79LiT9-5EZwz?Ri_xM$OL~l~bYF4n>;xA+oUc^Kn2IHav=*LIMBguBc z`8e32piCpMyuJeHf`$1Iz_RP00Vg=WP4|X zmn|)Al@8~$xK=9QJ(A#mB++Gq{Cjh&SfR zOBPB=^fJLv`O^Z8Rx3<-9A5Rn91SkWV&I0+&0lQaz(kDE0RHbE_hwFBKX^_4NYtEQ zFsup5)i7=`8CQ&76b$ELsCX9S?6L3@4L=FswYc^n~Rt+#+H7j_G4u#->cej+T6t4>JV0jsef`@g}a#`sp+R;mi}@eFBNh!2r-> zNjGx0^HV_uWAy}Q7d!6oq!Q(fXa#0Ml}<3jiql&;6dj+q<$1%TExaeR0nj>(M!mi} z-T`px#f%9->mYeh`@XpFqi74q&ND{nWN8A?>L+^{>z$(93I)2Fl2oM6vStF@afNUr z=cJMb6Qlm9c2(zcv3&_*>+q& z@g)!v>u8QTEMJXUs6+!g;gkWF0e^C&#R`8_thACyQZmg014kh6`H4Rv{nG>bBb$cv z6cUWCo=q&1GFc)JMsqrw7mL6G;WB%Ma!=fSJWj^J$u^WRp=V?(!p8z3w=TI?Wa0;C z9Hb8OOTr};i`iNLXdt7SYOEBHs{_3!jrj$Hpm~6svyw-O=vz-Dl|U zyFZ|Zbu%ew_Q-oxrtsXf@r)Rf9;KAXY0CI)IG1HHzdFpzMLqP{n92R_f^ev+XK!x6QZZ0y6EppEd{}uF*Q$n$LeNe3s6$s=dI$jN?YwQQ_gfv`KX$!F-iJr zuC`H7-x$)s>c@RGT>DbDTd!wR#{Pj)KPkk&xBVJiQh9?stsqJ#(C!pL=#5V+YO=Gm zoO8xQIN?)+B@mhk(TbC(IVl&Y&NinN)h9Zn>$HbEZX`R`eCM3;*}1-R`zs|^#%ih# z8{62ZvF8Pw(dt|w8M&i2a-QOfRg01}E31bs?I@jKL9-Q}@Gj%M&s2(Dvs?#M*bMzX zTa2}i1L62=`C`tlDtjP77>QXzRO6XN(tLJ4XC*mz-=5c4&c|pd%X;XMaUo>pqeOj_ z{v^y@fEBDf&TozG2ad4igyg}<6m{4%xMoX0-GWO>uFwlN2v)hbFW=V%82>qd z4YIFqp`hTwspMFanZZbVE50S#(KJrI*qJ?(3unvCkV8ql?4)E2r2$rV(tG<)`@mUQ zJ~S^yWnFXyiq3bycBr2EppuKJw2+5=D)4`s>wpbKHQLiw_I8K`!jOo{8CU%~W7x$N zBF2NL9UQ9^KA_{B5WGh^^}rlku&E1|cWgJb^;>%5hZ|@Oeu#6jYy`Hm$s)|o>_d+V zGL>jT=1RO~l|`r>q-u(0n9?B7XH3`t+x5ERJjoVr)HV|%bTaSl1PmnlMxZ-#5C<%; zSsBfW^O9C4Scogg7!H1Mu9kK?GePK(ZMlUF(`qePbT85u5!p?5-+~y)RsH#gTLv71 zYYKv-jXCUZDhrX|tn@{boigUCaZHGkc)TFmgVvIy1F*DCn0UQO0)m!5QYQr59$-P{ z0Tg8_a8f3gy2g|FA_@{Se0xn${H%8!K3P1x>29-7csmcRD0o40l3`;H>7I{$=Wn6i ziw!`3xS7|wTGpFYoYRula!U56t@p`uU)h7Im$V^4m=Pi4vtQ!YeM;Zp9A~wyJExLk zRa$JG(u(9eK2LCvn$74fF20s(r-xD<(-`Yq$-qZ*%R zUP5H%GzlS!p=siLO_#yo1n1vZ#Ue^w?b9s>+WD+Q3j#f?v4B8Rlg?At#iI>01vu=e zkVZQIjw~osn%!_afFIRKQPX>Br?)jfVZkq8NLw}*Ap@1x7Hw3|K&(GhnuN<>8$J!r zU<^z*daF%|InJ1QLvkv$o!G9pda9&th3|OF+ka&tdI}G3YZc@;*JkT&Cjm!>8}+ye z>+5{@p!B;+PSN1Mh@jqCDlW4DO5c)#U_o3qrRSsPBLn8rrVQT=GPml})vmB;CR*Y$ zB^zMNnB8Vd_@D_X*i~=vthbox%7zN%(Do8Bmd!$ApN-r}dH2BBXCn{p7YK|!82N*H zpI%3Baev77EbPxlgB)D*5dOtxz{ij1CHm6kRJhAyO*sTV)rZ@EqoRvLJkL*pk(I$)X`jsO{ahGuQT`O5I)rZO%)?F zZ}b$SL7cf73hvl6rsuY7fledoQj@{4{C<%Fh}18S+H^}ANjyW>*S2gaL?HX2xg}`P zDsp-9Tbl~$s$P<1tQe!SqZ9KqvRgaw;fWE>B5KWjJj3BvmzWPToXtt^_Xasu5+}a( zUgNYbW_IJp=5j_o*QEMaW88)dmMjRzq`~c%=#ZBAEKYXaL6MZTT)d#(ak@>2;XwU) zx@zV$?S`)KKxUGZf!mC^*X>Xvf|ETbW3>O|S@29N4MFo8MCKaoKKGzptu^%o;o3^Y zDSAz3bI^OdkDKAkgr}S_2`MUd`YQvJR-xT7P{0O51NKW~>c3@VDx8EZzqr^d&bqek z1+H!2*tTaK;@a+Tea>)!D_nn(X|=c1{2*LYL(8@p>S0Y!bw$peivS;XZMKzasAVrd z-DV2{Cnli;8fIwMwlaG|H%5OEDKP8!n2+e#3_4C__V zcfn>T4@8tg$X>jg1>6*L&Dq3WGOq-z8*U2L9(=0-JGIZ;x8`9g(Fw`064kuoXn=#S zJuW!G`NAmI@S&sKNQ}^U#J5D>S)|PQv77Eb+vy@dyxfJtd%K8(3;=eFPOGS&lUVla zf^up7UPl}Vc*jQUr*E-eujE2YSj3K2O*a#f+#;Mm{5wwmasI>aAN^rtktpY^UWd(% z(J(ct&t+DhHv-SdxnvR-AMNudkDr6P;5o3-k6GQE7Yyg;_>yqdWOpPU-E27bxr};i zeObg`X%X+FtvDG#fWw3i&hjn~I(o9Hh2#X6#$O-YYr#LKZ!?D!;H)HlYLDyGPCRNW zFJYs9J`F0^8(-!moD+Mfe3_MSR#uDRU-;+0I{)Nfc7xdsYwSQlY64%*7__}3_r{jd z+Uj%c2~0Xm09vb#Pj4>}+|cZ}w9v;V0#RphpRxwrt$fX6Z8HfCIJ={CcYinCPxtmZ zPo8w1?0ZagICbv;Q`eCWk*5Z{=zT0tm% zm~zv?u4e6Sjp56iSp%s59ww{1A1D7Jmj_E&c*Zz+QHc~{7YH?;u2dkVIq{21C{}Ko z^%?Fu&J<(xeD~lTF1tWvMjXT>JJFrT|KpI>!wD}_4+&EBRv0Z%>DR-{!|6u+8F`t~-msgK&xPL?Nx z-`i(CT~d%J;tO^EI~}dPB~!JGs%AB@{>wH5+U|(>^>}F#E`<05o1{@XAkS@@+9%UA zrTcr&p_TDSK9WfP?#3sz1*0Hozp*A1_VY zE`!kG$2*~u7wI%uL1_Pp|BYTn1;Lmi6U&B;$Cy%)Qfb9>F#A+*v^J*&2A42N)k2(&6 zvPyiE99Kfo!lU!__Zx5T$ld!3n_I%>mTkI-*)40disiXY>o($Ht_xc{h+sT0Wsf8a z%fPe_aOY7dY}rkd<`|qL74z^OhO>5lpGcVXtRuq0hKV1#oB|0-l8hBvNJyu&AUaqo zHH94$AlpQ1K41UG@L^6aKh~UOAH_m^yu^H1(8(}o*#+Ul^!M(GiK!W;zkl@bcOQq3 zHtTFE61k{}`1(H{;zqKm_&A}}w3O#171_s0J^h%?IBV*?|MpQ+_*8t%$)wT$zpN$v zPnPvme4I9dWU}};Z)QXmld1Sf1$+MV>F8tmb3+>Pu@Y19QE3|mUg#mI|4hZlT1dvp zM_y;=HLb~1e6)8J@Ml4*=Dqss=LU&?Eo1)86`s8`4lWZ4Q@46ani)XJ+ zQH3SFB*ns|Gn95S^{A-`l1hZ7pG-~{y+K>qNLhia8D!n5FJ{AxFKP*C*M{5m#Y|^h z$CYu@m4$dstH0U|G3Jl6SEswy)99UmRH}0ZHc&06YR^@drjJ(WPk(udYOeFY2KiT& z1VOaZFwS!{rJP(ru`635!K7gUV6{yZgL9ku7+sOcoUsex4`ox1e~g})Xjx!B<^$W_ zO|=3S>ahiKR5=VKX{dloDSU zH($!p6<+k+5u%zwoh``eX24tDU>|1(;B1m-lRgl}{5La2ntDtD3?px%?=UTv8$S-w z$;;P>R2ftAaCC?!O_jT9c2V^%$*d_;%Xns@P4u9#P8iFkvtS%gQBwtV1~lJOC0MrK7b@&5?HWjizXg;+MeulPN3-J#PT|ld5+Y-gJl3vankA9w0#^7Cb_+6xyjQ-g zdS$4+sb?JLM4*R^BRUOB`$G0j@kp;6vGsx>FT9cx69+m)|Jwx7HPI5Il+)QO^J-E;{|Kb6&3SVktCVTS0$*6vJ0)rFZ2_Ul4SD)^dU2EaFseEm4l!Q zH}9qgsZU&ED>vKUbyF0UB#)DYQu5tMh?gnmS}TsTLlO)K-mmdjeNFU5_ax78f+~<+;OT*P+}h zf;u={r$`GjO9m&b&pJ1D(tj(8KAtfs&v1)bd#FYnP7&lD4ky8872V!O-_b$|4pp>J zg`%96cy9ySZ84&-DsiXQ{jn)S)anui0GBqAb6hbvNSA@J>+S@B7mzvCSOr`?!7#zA zO^pxP|8=6~zG9w+r2a=t z$InO4_gy7a4e*i2=$A&vXKYTzWtN(LOyMiQ)%ga+3o%9mixhbFSZ-jG)!{5t%1>{_ z)<%%DmF@9Xg<$-1qcbF?*J0~|{ZK#dHLp3YH)#29c?CGwfNqDh^3p^~OJ7RZAk{GP zMHG%tf{s@w6}(YO!JdSIqi3dKjeU5g(0MWnlbQoJY^^j!(_FgTj7&w>=pgk*z4CSl981FWP~yG!k{|ObBXv zQk^;6)i|;6-fmtD(oX+y+g3wOXdRhH?DON3XenIYZKa|aj_h`ePT&XtywS(prnJ%r zZv>$&YZ)-Qk{)&?X`G}6axu$=rU`KVTV9A}NipkY4T7Q$SdJX(t%RX9>B=O6Y z<-eCL-*A4DtjUxM{;$sB=`-rx%6@v|dqemFc$=#mV@pXxB_UnQvs?*|u zI)IOkU%b^dy4&VKUy$0!Gj>YnOvpD{%85OzZdzsft~I8$7TZ%_8Zu%#b3mj1d>m%wXeC3a4H6Px$ z3P&5lmVJ-zl*e03E|c3Oc}}9bAaD@%D$#ugypwS2edY!{Hvp{%;)?l-KRFc{yCi&p zSY>NY!8b31)UX+@po}v*(;xrOMtxvz*`W20NdjIDU+0w&{POqbGhS!uxy zMFrxdB;~}^2GkWL*J25*BSH+lVG{k+2qh}l3w`0Nl>Yzcku&G_EAv5Z&GlwcUjs%> zY2X2}J9<9q20;x}t3k)0`i50OMYtQ3Puai>+iP2Wt}9Ob!riU2Ro}OX+N0a*XtQEG zP)o>yxoJI~Rw165Q@+n9}QvNLMJUFGOsXqQBQZb@){Hf}_EyZxkREK$cN zN}dd=)Nk1H8k)QZ;){|u)GSx$E^qV=?4-$OMD~hO8wx?ZsS;enn4(J8B&qYsaNB2h zNU#!h2Z(`3NkKX~%|S#Uq|!Xf`HAis1om9#pvELc~vL#V{>-&NQ@wSt{3x~hp-IN{F9 zL5);@hOPikG&v5d;P#fYg`^ol`S)+WhSc%k9%Tv$+fYaCrU3wh6o}}OVt6$LJ-{?@ zmrJIXQ$m(d%A{bk;ofL(G#u>@M|(qB)|_4H>II2piJX(f_KIto&?@l&5`}9-L#jEr zIVEhG;3^5r$ZmJAv&D8dl@07J4P>)l?raNt`!f?#xSF;rOZ3LqGrsb!wmw1a6)l?2 zuq0P_5!UK;gTrzzLQUIDq_!kR++4McZVr6e-kXt{f^?=P=V4AH;U%qzKy!BGMhIO6 zRjx~|Z$9!I@Ke&j;$MV0P+U77BnAnHbyx|j0;lpt2=&1=`Nbk4)A zlFXbnlALhj%*|g|$R}bUfoT4T*hrU=1Xh-IiS!OT@pm)gc-?U!I5ZveHkJ|1-e`vIgbtG`#wYTVirFC$tS?<$W)^Ig^ zDpv2<)5r-#q2lbN zdIPCPoQRrLx!vjz$zU^Lu1!v}au&%YE*h;lMSR0Q??wC-+`GME87{!ZLoB%|zHArnL(yQO-={=7d%`!ZS=OA+0AbIzEX|Dtw>q1bN!)dvd@PP0^U2j#&H&BhWW>R;xpy z5;?E5y>xHi#hnL!Y3K%y(l--QH*=NtDnf5;c-v#zSdaRgrp>F4-?VwY%yrrx2toD* zo!dBnpf-KY3N~9HQ7b+V8&qNi6(C!%qVEt;x~tPNCV6 zZxx9SC4PT;L9Xm2<96`jU(_VpA8Cx`KUn2Yeo&gGsz3XG?avHt~8khofe%o|znW1)2>AGo5pJn$v!bz~By z^m-({>Y5qD6x1X_$K`IxNyQ{_b_FjPTd5tMzYP26m{Xz{Mwidvid7G!9`e0+e^e06 z+sDi1qwR6Q?R4$;(Wf5#9kJ=13rBqK-oEm_b3sp*&2WOVx#D|iA5FdEv9zV*RD8Az z=Nc`nS_IjMjND}NxZx_dK0Ef2Q6oE(=)V#@pJ`8C$j5K}XHBG)Z>kgav<-sl*CI$% z1vW#;BIyIh))`tS2l7oLzBW2ESH^rrE)(szufRU4H8c=U)jn_!^;`(GJ^X~=j-;#; zsqC1Mi8GOgAQa*;`&q*5q-F<7E9_r(cVf*v6h4*T2V={34%6hKX4aY z8?P74vzi`n66Q?>_GiEpojHGLZ(tQ`M`(hxb%)jlZC$M(B7+!1v_-?^jH<|$TMyG+ zoXJx^i*BLy@$(wjt@R9_|AI>zbh~y{^|mf?Er!uBGY2CYDfr=^)+AFp3fzq-WPaA( z`K&A1Z>Z^(YaaUQPnkjKReX4jy*SU7wZ*ggHZ%eQD=>+(5t%D_ZQB z&k(z+{3@qwptl%p7kGH3^eN`Z=OnU+gI2M0jG8~%o1)8XW21{b6tJ2NB_(~hHu(UN zKyJTr;^48g?R?P`{$$buD%R=Loc*Z|ugr}Xt8tn%S4yyj?GOZ-U;!N6L_pddI44Dk zyvcRPF#w+^4;O+3jw12u2RR307UJf}4&)6j-U%Z9Op5ZaIta6MF15+3hc=8IrY&UG2i==m-_K#%kj%^FKciXWoXzTG?w|7Mc!Dai@5}IJ8zU8q* zwxr{A&Y_K;p7B}A(Q8%A-N2s1YV{t6dI{`1bQl=bp| zoxFT|{QBjav%^>8&|5%vp%WTNPA82dX7dZgD#<=2w&)Fmr1KbR<`io6b9Pl(NG``Z zCwh8wEHNO3I4O-^scHIWBr^jwxPdKN=W+XP-9C3!KYQ-oPgss{C28W4#bvm1 zWjkOCy5hvJE9(Mhv86d++@S@YqhN2$P-VRHOnDXX`pr_P{;RGMg~8GEnDmDcu~B3o<-UI$LL`6 zCmP_{XBE(s0QCjUFk54X3Oz4q=r384oHpec>7As*4TU+Ok}>@?p8joR!28qr=D7XtJ%tYL^ht>j>ETE_g~uOTN)1+mj%+K>suH zpjxjn+JEw_XVjX0UPDpT8xLK77P(e<&^lP{$i(eHQcMVeN35t>B`D;rWIAJ|Xq27@ zRRW;QUK$uZw80CUY0YVYO1$u;6P1iD1cm6wh3#50G~ona_;PizLq0l&&TxK7of>>e z$yKZc={%Gblcri@1$bKNomLquYbrP0lqFg1ny!1Fr=8;5m^Sf^5mh z`a<7laB4Y8dxcsS!axWCWcYjvZqR6{W1qq($qGs;36%lO0~|sjTpBnxgaQu&NEc*b z5xNbeJ8wP^6B?Ab4lpp0hu!c+2wl_=AEv*56hPAl^LgPzADvk(Ji^;? zi2h9!*BE@=MeE_;|6}}zAAbL6{P6$H1034Hj~{lg$BML9BH25@I5rdYhpaku%oe-9 zZr03AMCX1|(#I>O!oAb?73V7m!$J)3rR>~l>K~0c;9Q#n&L^#*QFhMBjI!!u&l@KC zo?@QO`<`3n{#-8VP5S)@tH`6C35AhHwbYNNp`_3&9%uo;Mu>VpYlM+HsS2OcfEG~9 zG%j>1w8yhk{a(JW^%a`+0OO(Cu4dMX2fNL?jgEBbk2u3-KQ~>)X|+poSaWn)%}rpz z?$-rb_~7uM#+knw*32k0ks5)_M2FzGY?c~sP(MYFJdJEn4Vas4r{E&%YcXF^j6_YV z3Sa1K(bo88%0!!U$3_NJf->NZ2I)fKXAr8uvHf@Qg zN*C03FsS#Oz=$GPHL{|!+_g%A&b6UZ8BLb zS31KbVaz+xK0t8vm%EZ*hF8* zyPQo}8dnZ|qnoj^WEFBnK3ue4d?}U(c`qtfK^=chJs^iQGzSARfSl1B?TtpGv~7p^ z1SbB-n%h^ZDXz?<&SGpCyU!(v$ z^+bZMug5q3T;dtJz8*M?p+I;zIc|o78ZF5Ei|$yg3~C^8CRxUcF*-XsanGZxil7?^ zC_TY)J~q?2_L^nM$CsE7Gn~!I5->gYK}(JU)MoL!?H7j8?*@76k1HxS+-Wpwx^%bY zAbJQ^l9rDtdRP-~N|%Zqlv9o*C*+aR$a^bl??&*q#pv-#<3e`pbi?*+ff_;7Ph@4oi6tqYwgMwDp} zUE2j1m)t}d;|3U<^zO4>jIlef;VvYdr6$)0lI((18k!y}UOtNUUHtdjQIPzZRg4?o zI9;b4PJP}G>q|(0QnZ*9cr4Dbi~hiwh8_?uauy7|t;noKHqMOGIUXk9R#`&J(NZFU~A*)*c9cQJPD3rL|3ZWNH5Is%I zN17~jr(iRk+R1E9rB{jm#`uM<{;w#pAK#EbQ}o(t)DtG>ae%xl{0R&h(u_o@fidM) zs9ZDXZUTl^Z1x@SM`N}So@Kh^RBAH+=PTn}t`*~g&1NLh_<_M#uX5*0f5j;jGIC)_ zpi*k%we|POjcBO&xs(`cYw80mMTZ4=T1$FKiiI{wxIPRRGmRJtnJaz3y%$Y3cijp! z%*o}@w2=&gM)ZillbV+tgv_Ir`!guCV4!clMtkY* z({!Y>;Fb(>(nw4Tv4aF5o&3Qp6X@XzyO8l-f*^sUV6&mV&-po6+GUL56zby+)bY)iGSFovSA%INhoR3i`1qs3ogOCtR2Dp&}1|d|8W+ z6OHWM&Sas}C>jfo-q339!a+#}H;{l1E}VTQ7?U2$&}8Cjs@#}#aw4FhJ(7O>k3OF$0u}cA`lu!vn~^BhAgey7YJ6cwh1>`FSsDphGF*AFMU(k$7Cu(_#3I5XiL3P*#2wwa!o@f$X zdPrX>!35g|t3PpSVtbsVlU=g2)oqgpo}irJNzX?kb$}p9h=N|41hU?I)Z?(+1f#q`~|e9tZcp}9^B z3Z59Dq7S&QhVW!00oufk#&!Yz650v5dax+GlxNliK2 zUWgQOe{-Tf|Ev0<%{=;1z95S=s9uo8GEDWQ2OK>%$P&c!&)$_a-=;bW(eKYN6qT<-e@JP_x|am>WwCJ?E`GzJs$i%vHMaIeQWx^G;?E zcBYN(F>v@*bLV0uHCs*B$PS^n)}yV}geQgt-dba8jWyIvMr)*s+9f^xSCuF%we*M+ zpSHdS%gt#OofaXXyw>mIWZA#@m-}b#KNB|jYd!R z_u>De(Ww3Z(ZQ3azuDa%?e8Bv-Fy1gli!SXpC0Uf^&2$0uR*T**9eLEZ$@|St39}X zl7AmQpx>oO>Xb_1K)t?3b1cw=7~2Xi3X-SjXil;VM5RzCo@@k~ENtk7;yOxy%VdF) z#B+=2ZWh8Qf~_hf&Ms2dSl`hiEtBU0B?TH7>&MrC;ag)0F*pvkw*E>5m76k&R`;er zp^&`Ew9^@@pm3lQ9?*aF_eD`j0k8)3Qc zWA*;uJ=otrXzl;6_ID4y?Ehcl-!^)yDPZfvhr|dzD_kj(>uYF8r_LWjo3zsmOIeUq ze`&7T-kR^+XEmploT9;h5yQc)0FWy0{Jr(z!|?G|4=0$D!u*E{(GW4*KX`Jm4S#2> zR6;?r|J7Gtef8|g^WCj?0$fUph1RZHL}1Lj-{nlA z@z;n}IeCv#G;%LQGXc#be7lQ2B2gDqzEU_0QV+(l>7ezVdu!rD#$Q8!X3^|h4u)&r zU+W_a&;CwPuy&S|%uIPS9_BQYXaJ|u!2VL4Uritu()ai15gKTO2Ck2RIV%PlU-JtM z!cvvyUHIrx3$?9n1l3n@p+sWFCE+4k5n`S~>XDZ+%Tp7@673Sk9i6w^xflGR|GE0$ zFfW&VvlqQkeGVjV$e?$l=zX#T`y*E?+PKwkGL)qsg*dKnbn!%J!@;uQZ`9A;@>X}5 z7+a@kqY3FW4iQ}ba_t1HguB;$dzS&9fD79OgN+)72=%{MJIJN*_To49OXfeZiRLRk z=UNwK)#&1hNVKU+_gM&{pDPK|*IajHi|;^k_l%s&KXqHGxMzIyjp8P_sOANct(4}e zy{N(yWJTe2h5kNb4YJR~ntAs+EWsNyOgKxgxP4`CWC>agM~odG1ySO8bF=R?w<>Z*mH3sU|w> zfPNel_ycpJt%ip(fA?W41%XMB2@*oo5>`ZJ@D|c-1yu!wrYu`H&5S7#VSQ_*ltW#@jEla)Y!NxL7pPDSdBakfWfv9d{Xq zE7fo!InE^7eWER^1%vv@=;@Qdt*9eyFE>~CrSL4pTTkq!RV^K}_=qHLstg^B;7?kf z0`_Q-40_n!@bT7>HbG$5h0+ZcNYRan$E$0YopkGOL<5KRFe)`u0!@g{hYjw{fsGvI zbV^BKEX!clwp)Y>^w5Ouy?Aq~+Jc94O|nPn*74Mbekma4qEqOlF;@rP{tfpiSRf3@ z8*I%^0WT}7M88+sR#>#(V_par~v&mmi;{Q&2t zt{DVMpeJ7e5;#**zAjy>XBFJs+;!^HC&`n8ulJs&UHz<|Vw_UfcV^m`j!5UQe27%ITEHtp@0!vjwaplJrq8Ls*7*V9?wU^}xY#CGug0_fLj?lxkqZ z0fgVI9zxjiJ_cc!a=`73Um%>TSUA5)cUAA!8e>t{b)=KDjNKLfClFSyLs(d z{;{)rg3aAKhCi9;Y&5}wX7(m^L;Q4Wuo?lX<4r95eo$cgF((Lo`nxvk__<*cMC&=> zR9b7CZ)P3pf|f0gszYh@<*t^ZCHvpFEP5OCn4i2eQ zvoXY_Hzwc;ugkawg1sr|3qBbp&LAf`bO|a-1UJ0CY6i&i87n8WBDp^O#HKQGa0#pj z-dZyEK9W)O{3L5z^-O9@Xw_p0T6ZVx*S@6tg#T9F|AsUc{YV$M-26s(&HeAwgWcBs z@7|Nq)4ebEzrV!4Rs7MKa<8wq!YcfpRML!b!a%GVUhZyP&?+BC-V$53q{Vp4#))qr zGzJ_LU0=s3k&tIn8^LP1m~5e_*aZ}`*bqt|HsM9@YN< z+8DB{X`+b_@|nM487^)BBlyY;t|RSiM8<0+rnE zw;D>_?uA3E87D$?E)RPf&6$v*4^vZkz##n4_(7}(HF|Wj4HncR>I@MslNd)26(>Cw zXh17S{I$$irs2h-jT@UAm*f1=qa`b~v=elFJ-m1(h6aQfE^X{OQJ>(N4lj3yzD{2F zT~}bcz8=~*sv)e#-HwW-Y|C_#Lb~beYaQAcEJSy50@}DeKIzR*t4)UxGpVq6OYG3p zd$puarYby|ZnXcy&QPz-H=n8i*8R*@_~6J4*l$6EmcPF_@+;30ZmFG^sx9(h**U)Bh>yVFg93qlr6H#jijYJ+`R8M{%i^7x^*XP0@g=~?pn=10`>o;DqcF_M6l@uVq=(@+I#KkY+s z8Gw7!zHDMutL)g6rMFRbEUf!MF3zE9rYz2@dSfJ8Fz-n&YNr-A(or|#_=gX7fOu== z$*0pGZX#O`Z=n>mpx)0^+Vr~m8o?HzyeV!L%6;NwYDfl%*__L_!_L54W%+X1e{XPv zn%u%SW(3-NdzeGMJ3$V0(KEvp`d{9}II|r5ZC3>n{^`QNCf(g@mYbc||Lz9(E(hL! zDF51*xwrVY`Tc)Z5L`92K6!5B0I(+i!&jq&uiE$jd%I7+-2eX)|IB8?RgNC6asqgS z9#%|lxNjOkUcRS7D&+#m<rRo?ACkpBB@zHv&CSH z;Gt-;j1VYd1iz=47AA8~{I%8?2V*oi7;M>U+%hWE9WEn6pJXYWy4cc`)-Q+Em!5^X z=iS#NtW4g2n}3hvU+n@38?e_Tiuy`8tdVk-z;kr{tRvo_xZ22;SFtrA82ZzI12z2K zTwn~!H*tK4K5^bE5oStBJApm3T;7Pl)b4{ATE@x}S9xf{NYHRXtD%@j&w|n^>gP9+ z$%0N@j-DBjw#a8v-6l)V9Z56%9DSp+hF48dpuKPY>+Ty1HMWxoPuB(;c2h^2O&UEw{9bK>eIs_lY2k=Rj zWl;vtY1g3!0}vPt<}(-!biLsJ8S67c`G0otWc)w1XFys-UME=xe8`udZhVn>ptfL2 z_;r2KZJn1c#>GFyISdM?ojV=r*0R-?Y7iGfY)*;sP~WW_LUBlpgBwC^+6gP_^e96^rEidGoQNq#xq{7WQ(BZ>xPf^?Y(Nkw0M+ z;lp-~%p7L9T_@udmn}5ul1$MMYKH%egwc0m4o}y9C~^K_9q|*Yp?SVeU*rdk6jaWA zHHdEqNm%3Y>D{GPmz6hswO$*CAU7tp*!dG{GuyIg;&);kjF=+2i6^@sTQ zMgHKh^j~Qlu;^{opLgufrkTN=Bn&9tf)R(h+9*Jt(|*qrXjgTV{-N!+yFQ)me;?8K zNMmax&|v?c7_&nD|MAqAD*JyY&zBnDG`H_;cw>&-Nb|-k8h#0$ zig67H$4)XbLboP<$^;o?7qL5G86d3eHh3ngGPg`O;F_dFx-3=gWZ!LM!qpM4SI~_I zC1WLIF$l=ukcL)Pw6Fsrcv{#8&YLT_nIZ))a*`^ZIpalMNeTIh?vSIunxZXQklpUj z0H968J48U6PmAYGeIf@e091gV)}^IGl6#RtpTyElK}n&K@O|n)NoUAVPWaAqAtren zG;e={tPP&DEN6%3xk{{Z(9SXkEv$3U+CrCh;`*tu)PclW-=2Pw)KX*RXTM-jluM5m zJDxi%3MY%QgI4?~H!mXA+$jlI`M}Wj5+8osM;d|90`*3IEANq9SQn zv<;nF&ym6ZpZ#^I`CU4VB_u_l9Z>l}m?Rb?u*`1mOuYV!1(74>Yj?I(s z%QcYYj*E(8qCe!&kQiP`kYD!Ioh5RgPKLup>h+=NY8i-@fz5)?b5ZAa z?8n0F4=ZkIXx}7%Qi^y0@{b3eh>b1(*GO8{yu5e|KrnA{I4-K)c*fY zo}KIeDo4=A*qn>b&E1FIzsK0i!l%FO7*SeN{2l`_eTu(e2Q82_2R3wAxGww;_&HAg z2Dk@&ioS%!5Hq2(zHF0v9v=!~=s4chhlJ0!DOr|ba%Uq&RzNIBcy#+0@yH-#1pa@( z!b~t)wc^A6MWg-^Y3K}1&iCfZd= z|7m$wk1$e75vBkuh1EEHWc88A#1;H~e!rLIQ-g?t${VF?NN_^seoU02}y z4ssFU&{-dtOi6zX7#HMGe~N9})SLG|;5(0AU~%1J*Z^_YP57;gkkRlc+46HSnS*so zI~QKh!R+K@a&iQK%_7@sr&x(WzC>GXap1n#d@Oic_{CxyJTS*HScy6Z&iccQ>Dh%~f~L=V2Q%suV)Y+NDi- zkV8b9nvqGm=jyKDz7u6S2c-CxrC%6xyn^Iw^sf0O+jl?f|%8t!pr!b0ZKOKepDoe(D8MAjtJilo<=10(lx zouevxl78^RwdHO-F&J3%WUc4_TN1Nv_HAWCv)nQUs+YvV zZ_$D`P|47?De}9{E3_*db;o3A(f-4;mmo=tCKTDT-zwuywj4yV#WCL3JZ_Ie4EHCG zTjrB@md9<2sPEzOxZMkMJv2ct4$sDWv1+t-Fj{r{w9Ol@+u1!VZg~CVk)J(}mo@>n zym1%rju~6plB;J|zu8va2u}8N4G5&L`&(!&^oji5Uv5KQy z#YO$lV>E&A13GpxN9x>IqCxz$siK!g(3TyUKm{=$XUJ6r!d+kY^E z@LkNnrt?2zqZt1+nT}7C{kMx}8~NX2uTByV-e4|P`+&B1L}Z`r$9uK+lD^-D1t#NF zX?|?w*4m<)e(2fo_@2)CKPKFw{+~=wrX~G9Q|JG8@@zx@%kXYt5m<}Y(DW$s*wFOc zQE_RA3m>{e;qFIw+T}61qt62X8bxYHL5peJw!k+Te?~-`_mF6u21v{H`^R#V)2-AL zo2{>S=o*QgYY!rp??rdLi@bcbNTvb9)n~OS%m7R{qFn!?N_ zT7N_IulAm+T;a_dae*L3^pkgq$amOD85Ul*ZdL`m&D+NvmW@yr2?C=Rje*0JoYs*g zvx){rc_Kqx$SHO|2DXi_fi}`MxY%h!Ypj8_6!l#PB)7(xSP+7|Fn58@?iQN*2R~OwZfp~#oVZSSU z@_Sy*^p$DT^Cg?MS?V`+0FZ9%bgrYkom2S9X}Ej%-{D+cy0(7HjQ4bR7y1qHJV(6b&rGHi`mj-2a)D^1m4; zr^^1@#Zv%Z;W)n|Y^R|=VVxFXcXzU_83FKL??QOQ<{)`{mvg8YgGLmMLIhKHTxOm9 z$s_0}x5y+5%#w3|5QFujg4!;2zid~&xU(+R=T(e~iaoE?D}*2^oDj=5<}0bohk293 z+VMXxU$KE0X3?j~$mNe+_e-1h@7YlQciIJN(Enp&TFn18JyG$WyLr0l|J`rntaW-u z2WuH6tZi@Ixck1$deuq(x~I*W>{8c$pQgiROtFilTPmd`t$raXZgSLZk~9>p@T`|M z7Ditn9npZq*{j$n4TCxQfdTY+5`$((3=M_A=JI-4$strw53wJM` zmg~RJ2GGaC$JsR)q-p&>F^cQ|Og~ZU|4yEQ`UuDQt&cy7QQjXb0Qxu*yu2L{r#IO` zct*qGZTc$PsY>7u(-VC{aTXuF#!N~i4?x_D; zZ3i~#|JmudsQ(QW|G%4O%lbb(!Mwx$!v|Rd*2e4Z7J=>WH+Ip^9d;k<+2;2mC5Wqj z*~({puHDG2Z%6&MX3MgTo1CWS`W``qS4&Feu4fW`a+Zwu_Ls!%=>K{@U}OBRQPlsZ zC#Nd@cPG#N=>G&Z?~Mbodg|TZw&L~|hMjLS)@iK$-F9pxnyp==ojKjdp5&%qDg3d; zLttyS?R_{JHyaGOi?F5F@lO1!f}T57Aaj@2cw9H)CpqR;?X_;KVM~7Z4eIHzo8790 z&D`>x*dFdqcNhEbzT$sRPD=S-3>E*in@2L?b{+qliT>P%#Z`^~PG9d6Bo7i++oJl*vF0|EE@4glW50`V}=e7{0~b4yyiGVYAR_ahd#q9yTn|F|7U)un=t zSgM+}q}44zR6Bk(S6NEMC}V0oCMntyBHwmIc?HkSP`8RG-?OKS{fD?|wTt-5KTY`G zDDD4DX4A2<|90_I?EierO-s7v6aVl4QC^|+ZOPP&S!JMG_-1hD5b$a*!3M!w<(^Iq zv-^(M5;dz3tw-^6*8dwgfSdIHbUG>O|B0^RzjpF$r2ii*%IiMXfP$Y`yx%8Bah=Px zMpJimw&plBMrf7Rrj9{bx!RijvU2h+p;zf>cZ{PqU@`|VW11J36##-_)KIK$Z?uS#k%@Wet57?&gJ%@2+E`_RUQJjTa6Y*MF zhbXR=-PJK@sV$2(0aY?yQS8~eSQ3lYq8Y`aRZVDSgeWhxB7W1Z9i`%%cE%;T$uo*y zv}#7F@Y?E${@o&`PCiAd!szPV$1+}9@^-gl-O=4GW4f8YyNT0-JG{Hte?IlDZ#HlM zH~W8!=YOY$q2m8{@>JM=AHt!Q8i2_!58we7^0y_^Ea6oQ--+Jui3?cL*SqooM^!5) z@S#85^?!EV40@tbPj$YtAt423) zd41~W2hP{m>EP|uCBa>b-j%nui1wuWx1IX zuaVV*d+|1Zx~%`9^jU$^Htk>fH2VKe%KN`6{%1E&#rpp-8EUBwkp1-m{D7$(ZfT9q z@+)G6^>2IP6Rha_UHJvq)hplNAv~Mu|873OCjEahIVtM@$yDWk-N|!D{r_No!24YT z3jVs!7`b@UFFt8MlW)|Ba-sxL1Shv_T4lm2h%18j=_ zoRs~4rza;$|L@}2j{e_IKVWwL?t?FI8_USU_yhBraKC+mmBQKDFW8#H4t;}_ZQ73v ze>Gipr_6pE@hwI>c4=?NnAlEy^6jeivsUumQrKh%{vBf{D>-lGdu_$8$NtxcemdEI z9CDPX-%(*aHoyO6Je`#C|DUMyKf8HkGdLXPDAmlqC4$OH;J#ROxi)RnvV*=q%&*E} z-F3E6rS*^<(v8ic4onJ`(b6LxjZIO&X@I0FM-BgNEqk4+=(_UNFyCLAYNMQiw#bH-P*z))+%~ui`T(c^ma9* z&o1sPr|HU2za8_|%YBQoxA%W-smwat_G2ovYXCDT^wB5mA}`=;A718Vo2E#)G#cRj zov_^mYfDL4JJoRashaTKEG=tGhHczt?V9UT?*eVGWEJLc#AZKc@Z*~?QSaa1e5vAp z_t_G0-r8I#v)kTGp@?|_P0ZBpbEW8Cs;S}w&?2sI#6#nVoP_K(XxLPGl&O0+Q}-^bwnVxPYKc}0-3}MJ z@|u_YBNLDu9ZzW=Ts3InE^Q1P%N>eIJfl(c%0qjw)pM7p<^B)xEHp^|*>xAFDgI-8 zIxWWk&Q8?+&rY6#h78B~+jyAwXA>wVeGeb@k#Ej6BR(>kIyVvtMN4fhzA4j;V*i2q z5MEJ^m4ub&R_s!M5B~Nat>z z+Vy`yp$!QfnZiNmLVsXEw!H8lwK`n?)Oh}LR?7c5nU2-@pPf9C^FQ&Zmrnj9?=57* zz~S#wQTTRkCxq;t+Sox1%-?|Fci~^d?;Y;~Xlv0Ky%_$Nc|^-7o48EX@zKab{3G;# zz>FVZj@&qYEjl0nGz8u+BPM4a;fQci|C+04SK0!x%FW*ZiWcblXXHE3L~k7jXvhE0 z@Wtr)@$kj-&!0sbpK1|vpgp}EuSAOnpwL7B3XMDxdONjxGIozoG2u4QzF~uJmjV5vPVge3y79?bA#@?1#v?v9dgiW0q3Qhp zS~qS355_IY2xAEp#@ek_;~9c9c9{w7*QA!SfbzM-Vy>$T65?^lu}5NyIrBnhFSq4Y z69S&q;`N$%&AP0Lcdd8DcO-(cZkkX!bMSQ#H)<_whIo_th{!^C8o{*p34#pGk`^71 zu8zs8k9jsVlF;Vv4 zE}l@NMlX)OM%$LDMzz{Vfm@QoY;__tdjjqrI z!yJuV$T*^-m}F#mCL5UnrQtt3f3OI!g%a5k$m!^v2Z+akxfJRVlYxNbe**6b@Ti3- z;9l}0Ut$JATn}ta83zu+r}Wv)*&L_*QM=SoKW51Ng6Ns&`j8@^{pF1@H1wf9&^1YOo|owVYz&QQHTkyu zK-kV$(#h^a>2|@;hVG6U?%>wgiZezFWnOD`!gRLo(^gj-_eG0-Gq23XKAD0?=0S0R zSdAjzON%Eusy8Ou zoq0)-O%Yp$Ej9F9{nWb>)VXiZ_k^G`J;#AQL**KqKV{u?gcr-OQ?>Dl?q6#X5+4%e zlv`BYvULlgxlH!jO1&og9*_PNnwJae1;ncUtb7SE9oiCCe@0427Z>n&JUw{{PNvcS z^x^C0l8%v-nw-7_#`yJ1FxA6iuUeKn(O&}Nbo>&8wd%vyr_Z&@Qoq%F;u_zC1Nt=c z&zeGwGP0(m)T1qW+V-dmg-j8T)YiR#!2*@$d5O%0*U2awy57*EE+z?a6DeDISIyre zNcl3Ht)62=oAzBx{RrtL64IB!xaDV6(^oj05#v{N6frJpo@$u=gsz$gT&B26L@xAw zOctMRd@(-lD4XYDy+{atVs)AW?Mpb^p|#1;HE)=3qTrWAm}*ID2BTvGBL~6;4II}u za3dw*!s`&aeh_gYO*2Xh1xb+p=vm@hOpg#`a%9!b>#)?P@LL-t(f1g)@pZ_#j$usp zok#db{D)}BiT+J(3E_!ox-%zhYRuRNn=IXg<~US9;M>liV&e|Uwi}$+~d&ZJ&&^3oi8CfcM!A?z5TCu@#eGk zG)`9hSz9%IAjmE!-$9d$ej2wZ$a4bw505UK0FTFu53b%*_~FWaB*a<-~qbA7GKt{ z4eemWW`?GZsEG)Ni#8qr5gJXQg@dp)Cry-XrXoeeLKKMw*mk^H&CC$Rh$Zgf#tw?A?3(A7s6e#QC^{p0UFlZfvic93;P!N~PmC&BpZ_5|bZhHivL7v}A^@y#sR^1VidyBt9*5HOssUw@VCF{tnZAqLNntNVg);-FW;Z#w5 zTBDm4pxq9joz`t~C_t+KEl_|~Mjip$X$wHxFiv}cXS+h#IH>x~XW`|J3i=nr|ApLK z7U0eWw(vRQ6hZg1wt5Z*pjcrW?6i4>J3~dHE#RZ5xRpV)ZA{b!EKhd`mbWBaKOo4^ zE`_T_g*y{2(Uyl4t}cMPLAZJixZj!_e`zxk3U({ly)J(bu)8xzY_)&gJBX-Y_rt^P z(p!ms7PWth)o4|W@7k~FUF8)|VVrvzLcAx=1`C+Sv;)Wwujsk46Y%j6Z;(Jo^ObB`_ z(`hR}@AmdTx3~XUt&%(A#6SMCT7h?e1tJQr6ZU&3|3R-IUUN_P8{3(I8#pk8u?pMm zDYg?zPQf)t-?1Hh=mvj+_@8bF_qtI`Nph6n+lzRvf+HZRucmUpmpo#V?A zqR`lWpfR;0l+d-tEh@8%lv3fAERn)3Me@gjTUhv44>8L>f>sM?3(!*R0R>p@to>C~ zU2V3t4F`93cXxsZ_u%dh!QFzpySux)ySuxD-~@NqZzWIFF4Q77f~3OkmRJ z*XW~CSVx2Evihl`fdC^=3)NmdSruRaCIc2p8Vb-u#IINv zeUd>d1?E62MD02H(qq<2R1eg0?s6NX_(zSrIb<=SKJha~iuhq=kPci)Bvueok zHj?tu8Tk5>SX>?U=j4&VZsIZGGgGNLK4?}KQJ$eBL`k2Bp+{+@La?K6#(J#q7=wu- zyCZjaBTVWEsBs5+Lxnf0^txD|31Yi7q|`s;HhK2_cnih)E|_X5iCMk zEMx6cp1c_pRIhij=!OXSIY2dV!pFxVDd>>n_HK$wH{%pJPpKWtEX{xyx}Su{ypTvv-Q7BE~#BA*HQ&dMnBLlA`Je2~vPk=xT!2bxi5ONzQ; z$Ov*8Ws9=;u>mDDYaUkn*^SuS3YCv?ht^Z`{W+yRL&Lb*!th8lKPSYC#+li9*3}q& zqjvDRXmU?FzS)I4u{)_$=S=C@IS-?|O~LQ|G9X&GJ?AId_)%&j6Y6o;DT421f%(Bo z7O|nF#{FqyB)v{{3Ze|03vgukk&8j+6#7$Knwa6PaEA`w#!q;I!Stt_uXjR3Ovxee zTG4vv@>1yeEBJzzsO9go8vD`oy0rooybuZU4v35_9llkHiosPU&z4+hAB#>rAG$(( zJgmmD5v9DDSrbF`^P?!99?WUK90jtsUzU|$Te7%wVVn|>D zo>92t;cn^=o5_ChrbmmS>cC;7F!+c25ShCLkn{x?{fb%&+UU6vYQo3U2FEb1dDjcMhNgZ z8Q(RTanBGuFk4XsNLUV-<+NP~jTCxM@1DI^f`{}i+2BEX)(JT1P5LY{=XsBo;Mi)2 zVt^?9fDvjpC16=gi;|e6sSbW8rmp}uMs|=>{~pBeC9Y{htQ44mz1jD#+;ihs@%INV7CH-Lf#nKt!1!-;q7_ zg0;la1+mHzNfZ*~rl$w`c9XxZ+0*GQuxw^%WFp~L{RzeH+dRt-b%iV&MjIdgeKDO{ zm;q97jaD(ULk^aPhJMFFz(+tAgB!a#Z&};tbBP)axSiv^cT-INL!o|yCwg&Vp*GsL zKNfhH;s@I&|eHE zwqy!DS!xwQfk?d3WU>U-P<+3TH5pM`B}^z}A>DQVvKOQy*%ldjJ$ca2V?zFZ z3G?`k&Rlc#vn+d$W+hQ@4Tu*~N^-2AX>DyM-}?R{FckH7l-_gp`Q2}f0gvu=Z|h+S zDe`dka6W5N-bTykG$W|&n`w&ajB&$`@x}$s8h=H%wH)}(rT#tm0;=t);s=d9mnt@ zrW0N-*K}Z2mRN3$4!?`YX*smE@+F(tD%#Jk{!`?RkKi?Ir4p6gOZy?mAs76+`ctMFbf(=v-d$g4j-Ddm)#R)BdWs;me$tT(o&nN@&2MWf zFHl!B(Pf>T(B#3;vbr=b8Um$TrGCY!CheCCCGDC;43A1b6o9G+_fdYaIU}~~6y-rvAxq3$=>l!@`OGRHIxf%VQdrZ5qd{37lW7RcO2rCJ7V z)r#7aY9K=|7Yh7o%7hosDr9lXW|~C+tvjh8r=Mv0n{Xf(e=Ja8i#VGCDbs*P=7B*k z`lt*Nt5<#|P$EunG#Shh8h&EMFG!O)CB^2wt56Xng8ULHq>7+G3@EQYt?N;CCBR{z zuEq2a{qc=8V(1vS#p&1_L4tQNWqlSUx;w!}^dT!$b}wZOe3g~g8JRMB1Z^_GRVAq# zH2#Nh<~Dkc&-3W8J@K(5S$}<2R8ngkY>LD}#2H1~qFE@O1#FWs^VnCCV6iFYg_2~g zk1{b^@I?~_63WgaA5lGcJ#N$SRb55<1Or8R==B9y9Q&#YTy(Z*xdr2+NV)E|)|JXb z7HsJ1GLu41&H9Hq1v6X;R@?2J@cWwWHyhak8@Q*FQ2OGeTwE(!PYYC~1)IUH0eY_K zsp|GdwVw^C9cI_)(Z$!x%!L)15}Pu&JF2SCV+2{YYi69`B!{D9OX+Ep>_5!Z0Fd-D)Y@RfE`PoQ_XsoV2s0i+8%)s(yV~ z92VEl$5RX`Wy{DK@2(#0h4N-i$=p%!U%Yc}Wc%2OXCHpADu1-&aTQ#}>g0IFk3|e2 zgbC0<<)-;fHo^a$Z0@%JWRr!D7by8l@2x+C7(yTj{CNhmHAWAIDNACVFtO?T$Pg$D z9ZA9I-|XK2Bi~QtAe+AA&nPakK4NsjKo4;QPspG33?YDU6o-IBW`LW1ar{IhhuPAz z=j4;khpE;)nuT+A9QbAc&r(YP6eRp;`$T1s`>C8C~#sMO7G6Qs0+2;5AY{ z0nBIA?YL?YO~e5*>$H*aSE{=#lblX|3qY@F!GZKkL0#rn1%pu}Ep$+kXlHqyRng9u zd+M(IAmNojCs67R!|whs=EE|9-%lM}#M0*>q_dy5w((AOA&Ll4>-ll2Sdz91Lj)jo z13%6N)dQTN{9y)nef*kmzD9F#Y@PNqP}4ZD=0YRH@I>=+^JkKmknlZ10=e!(gY82% ziU>IMHbH|?p%%!=y;7nk)WzlJ+!38R4ErY$MVFitL7OM`H_K`G=2#O_x)(hM%egIt z9s}q03D$|}69opEk09W~B>udo(Rc=2=vb*SW7KzNGPExpEdknwx+B#U^{142;#CpN z`Pr0J2OjiVMMqpmUV&iRS*-Cwv-z-euGDJ2Tv32V?u7BRzgVg{AoB=mT81bGRw&31 z4fG8^&__Wf5i&$u-~-oPK8`H*wT1b?4xx7DkH&w<=^f(t$~pg{JHD9ZTR6}J2pI8d z{u~xNA`Qoa_=XsVlG}Qas~SNU=#V`ULsbQAR9!yD#R z>qkU;N@72qV-X(S_!h`B1k>wGY-Wnq0wf6nxgLO=%A{N9`f|D2rFx0Lf>Ck92*83B zRM2cG9i9$*bBUa8t=RQSl=*0Al{UBn0~OApVZMc+p_>B)82z`d!)uk?FTSS(emB5V zieMZ7UEfmj`Fum!lUhd&A_LI%mq@U4eIS{}H|Ob)8;XH?O`EMpl)!-26bGZynrQ&V zWY>uWda@zX%&Y=_s+m%@t)`G*9V%`mfzr#EKA}{x?c9V!T!e4;oAB`PrEkqIVnVAR zwihKBVNJk-V1i%)M$y26a0AegtO^fEkbwJT@o+{#isRh)G(i8;vA+tiAnv>Z)A5j_d&pmDJ~UIfzc_1Af)Z4p!tU_+RDAE(J-Qwa>|k=;wx1vlGKbNM zAbEihOx=DUMCo9V^_DFz2zZUg5kR0hVSN|Pao@kv)|8p!kQbq-9ahe7p^FEqgrAg} zx0_0WR13G^sBLIBh~!U`7#?vTG)?8FpLY0MK1m`r7noC+;(R7#i=&xQDv{0ieK(nV z94k#Q{VC&#oMdi{H$7JZLK6ZU#492l!!T}+Jq(7O|W32sj7Yg14Bi*qV2qw{l|z1bJ5t1(4@ym|3lGfL;V~A~%_yM}Vz_~JD(*?i#w05Jd(+BxTfvWvr z2jg4BdLe0mu)t;RQENUc7mkb9$Z~$Ztvp_ZH>Ug^z@O(Qz$U>!wj>!+48yt`z+*vC(B= zT?NaoqgHEv#%?US*j@MuvVx3A4iVEmK`GIz@V;TVfvH(jsLAns3=&2)k@$Nv@%yiq z9|Ku1jNTx$zvB^lF5sRnzIYQ%2K{oy1H%0mb`sg2$^nx70Q>(e*>?EEmwf^%N1m^d z7YaPrR#Gh*cmf2&uLoPi!0qguPL2zCA*c?%nYa?h%*u%Ma98ruyz4O2L<7MFBJis!*2SDpXA5q!~K-Pzr z`Eg9;&tedxVUlsu2{wKLpTEJh$>5Y^{Ih0n12o$#WcclWX?CUXAI*LshY?ry9`c0x zyJq|UU9&AmFp2=0T>;SSgDTZUspO@W8x2f|-0hQj@ja{=X&Yo6e<#43e(LV4T?r0%Z3;Kw%AU{Utm9ikd zP9g>#&BpUVmJBa9dt z;|>nv4ObF`<3=(c!naS1a8L{-Fq)bYjaT83 z@`lnQq0T%!8u!#7Fp3(QhuQJgKL#u?5|ypY4V7(7p2l)Ps;Wl5mI@kY^ebDRsA6Cw z;FW9&D}Yy8Ce)3Ijs}*H3$pxan!J%j|*F9^gZkWoPT4$VN-QE;gs6zh2+7nqFCs#Dh9FSkt=n2f3uMi7{+pn^wo zj;di!d;2&O7uKVw_En!OFws2xR5O^DLxs9?quN5bOyR0bVQyWWz0O~!k@xl2WYO{y&r{a;Mj`!jth}p!vCl((f zKknj~d}>%XyZBb)+b;QibQ>V)^$L36&Ie2XTs0o%f->t*m9D7blkzk%Yf@GG$Nhc_ zMeTgkysvpPr5RskXU$MJJPUiw!1mw$6&oQm$hk>v+cu=->^84eXc=;4`{CagEOY(T z{Bv2IUzWSy@2Y#F`QU$cf_T5I|8V+Mfo6S6^eZHe{*K)E6EKq(J9D(Q6YkGdgoEj&}FTP@WU<)`q<-XMxUUe|~5Fik;8C zHc}7IR`cmm%qWA>4@WLs^?X%|OCz==S>4nmPd7KWXKVpe{_!*!DU?)j>L1z3Tv+cH z7~&8ocMsCWbnkrIvY`%Sc9kK?W=HI@)6>#>9KS@x>@bpfeLq~+CH6F*k>iRZV@T&i zK+i??GsjwyiC?L~klWL~f1J!p4*Q#5FUHu|U28iXo%g{8RLh*fY`9rzf;BZxKPA@?t`O5$&T3_S%(=P*>VrLc%%}=m$U$DMRbx)GM zOMUHVsLL3j07L?3RO22b4mYWClU2yk+%6-LUnH(|Lf@L-P+m@3qqU*EQOCFZf42$({_qpE+mv6oiYPEyMmetIIFa zB6AhZ5bejv4^AQ-q9PBsl2q8QZX}9bM;K=9^tm(ym=nZ)b|~GqodJt|FvW=mE0au< z2B6mIN#CNJn{uqN4BSmB;|Qc4zs=h5qogJ4@ids1#OGQ!ey%&oJr}`z1A-DLpY#hx z^gGicE(_wsK2SNo8fIGFrG=O1f(|?<)vbJ;c_|WI``K6s&WXLJ!mC;+oZx6hs$H3p zer&tR2NK;sY~K(;L2bREdOsPjw%LTn6UKs_1!)rl}6)7RA_*J7N zJ-QbRcvzJLA!SBu33^PRS2Z!cNX$ox)b^ajOTnihN&{#v6J$>OjE05UWKNM~4OG-3 zy=3OlD_=e_yGY7K+*E15c$8WG$;=H=zVdp$TG8B}6bQBd|@9}4B4FjH$wYkz@vD0yJho&*V= zC73~c_<|+P{S3@S(@95H|M6tn#FN@?lr9gvpmsorJXK<6OPYDi8ajJxKto^aClwTj9_^v@&FPJWO6!rWh}|^cfw&K`MJq7f2`zVf+y6` z48C$sh7o|T#rbz%E1P$&2F$*ddo`fb)ggB&J!$%RGk=2Z^TV#y!cU-X5C z|5ntpkvP5{~P2Xj>VKT$<{QmGYbdHrF_<*w$*c|ko8Oscyfv3xv zh&;JoD%+1i%Q2B6@0G1lC$^hSYjqvvNK@$DEpNQ^Td8D2?QNyJ zoP0Oh3v&A*_m}STB?H%aJ3f_3^`^+5PcBN4 zKt#7D9Nv6LE*+5Nk-KP)=;#nlE?ABFRX?vY({8W~f#XgW%^wX~GW(;v2z$fn0**KR zN>??qSff!NJRt=&L-zXX)*Vt zjW=Cp(A!CS`eT{V7{&8`Amw=eXw;JXOD6a1@MlB&YtX~?L!W>4xcXXI-0W_7h>2u` z&>;a)w+e%lY!N;N&WBYGmb^>zmL3l*El^nbc@kVCUKXD34QxpeSp@d?hm;L8;3ov4 zsGN|@EHlAFBY~jnL~4>o)-hv$b?>0j|JJ<&G~ep@KgIiA@T0szbx`!=qv!d!k4Afh zV%608`$SUh`VgA}tvldeK*xjGGPY(Akm;g$eyx2f`qc*1`)b@E9Q6eN-S%udsJXE) zOjv^q&;;y=sj#Ad>)-WK&uPdC)yWd$Qh>+(hX!7*TV!a^=57LSt%GDnoFzPA70mi4 zWEkAfdtL%83)BBC@8~*vDhjH@F+$xL-F>B9_O&Y#O0}Hia5rTW*MljFo~ia=Yl4JL z=N0me1j7!~&-V#Q4I&M)zWcv;T0JLEqB^J&~a z1M}0=fq_$m*!%wiX1bw2zzjhVF0LFs)cHTa-2ZoAc2uI1`S_M=>WL=|r<0DJDpPq? z(@>Kb6G%egKFe812f2gXt&(79gLjjDtM*R3p{&~M( zl=|h`a}#|bfptH}k5j4b8(LOw-)HRKz*Tk`!7RW0;$3#zG1l@!_jCNQm+C6nU(e3B)lOWc2}l@FjkBSuHWKqC zh^m-9-AnW>`phCiac~5daV*D|DQyzJOfkaSi75*myWM57!5VM$T>RMkPyrV&`Fuud z-d&U>K|{4&JSTzgqVW~IWq4L!I80dS!+(gT6Es_BA8F2GDGPhnJKk9MgA2dQdbjB{DCB z0$%-h%VQy=OzAE5_j0x>xtKRdda$*}eh7IKezL%81g=0xoYl!p=M1641EuKUfw zM#{xS;{m~->WigEwDIp5Y1V2ol3)a8hZfaQs<)S0jselmw718njwG(tdtJdM#RN*{ zi@t5x*E(OjxOlZyzsbKZLLGwxl-CPER*o-ZNj13S_dR+zMs(6nRPcd`@_v7;T@Vh~(5dN+{f9^8I5B2d5^YI|@SeU2``2lbiX=bZQFA@(wq(i$Jm=oDW z_ao(94V8YTXn0ZEOyrN_)K7|AndLM|z|ZGR_>u;PikPBo%R?-_#1j}ZIEf~cGIDY% zBdgOpAY$nzu(_dr)qO6DG*T$rtJONtnQJ*!z^777x;-x}Tm+xNmk8%5F4_C`>w3<7 z+6ZAkP+LGZbbM;CaFN%a%U{|?Nu$=>JW-`WthmA5y+kc@t}0)pxPjm};XWvJFp89* zlF1=d>yajp_8HYM?Jagw7)>)q$C!G7Ln|RM0Nu}70#VFzYsVsk(>nQo^T8HtJH>h#qfR5m2xG zrSIuF{Ex3>A({$cV@*sT2KjBk+JVn3*oWTak<}Rzp$BIR6#AvcOINg!Ts3}_dWVV& zpikEoMRr;_`dEj&cM5z1Y~;Em^>l!>Pp9#2+A3Jh;c21kDWR?G7XB`Yje|9a!G37Q zVp(vJE~+5;DPYq7aL>ehZ{}ThaZvSC`8Row;z~a9#z-`$- zGXSw&Xi`5q)Y_9fG(`_&hHDjpEvT0aFlR61Yr-Ln?Q3tnnu>*~?}$}FCzmfpeXZB? zSi|W&;B6UF5JY3i(I9PvrS4evCC)ErkmPH?WzE&$jjB z9CF<`9kx|X7b#xpYA@e>unwW@8vQE_%+IghLd|@i1p?ZQ;hnwEQoY)ByAE(Lz1=x< z7e)-kRVUQa?x~++CETzeE@Y?5Ja_Qc#36U0s(ZO zYnZ;_%7N19xkv44>(_mPCr`e~-?PYLpkmv+)nX@1*^EdUv4c23R+tjaAZD>A0t`He zZ0~{-b1wYDJ##z$?VbsD4gJLZPwrX$&|mJE&m?j&sQ`M&Ngo?vXWuYvt1cp(^5_2= z4AYzFMPOS?~-&HX=d6scLMdqy-D|H7@8#`U&76B9T*BH$o1YMh8I%w@&a92 zD(?9inFy=+T$5GOLV2Vp)dJUGv$BO#UY(*szL2Cd9V2uQli^I zjyq9*hDggTL=%Onex$hdoNXNKlV41>>^&K*0%hUG_p>C-v;GDS|VRkmLqfspFOi zR~4MBeagn9OnG%&6b>lppga+5+FUqtRjH;eM%>BnkJG~2s2%#M&s$6hFA}#NE~COX zPTZY(MF9E~prdUouez^s8;_?-_K>*u7UNz&E`FZ8RjxIT>_xqNoj=~#u}GO8MU~Ap zfFCL)cVzSAA@@WaS0P8$OELwwctJXqyO)Z8mc{)UwN|MuEZ00*4c9Gh(Y3o_J}E&i zxnf4*ay$^<>e`eHc3&`>DE-!O(NE%(2Ifq9?szgrnt^yxIc`S6Jdspsu(7=5eu(&G zDVMscQ)+>=%CjFf_X(J~%CmZB4QJY3)*Mnv@5KwEsl5IN0Ft0BFZ$Kbt$uXRpA|Hn zvPJrHm%{y}oT@)|UC%1Hyqg@}qL7oJ?KolFZ)(7d<6pifbSRCC14NUrp#`@jnXb!G zpfDf?k$O4xbhY+G>W9y&m~6Ct=At~!@~Y5Ab$*>7Zq`G^s4JWDR6X?A+xM)}3Piw_ z?SH>lCx07-1_bR2!3d7F{k(AVk8BJ=APLUY4wbD#0C!>36Tv}>D{C(Ht{V9u4}LfWVcKQ()X1plk(d%f7Ak-fe!yl&EL zC#$Fsy%JRCR15#MWb!`P$)4xNlyoZm<6ROl%Db2W5AhtO9coYHXT%IDR7G48WrQjL z8^J2&r>fDMFUqvOIOADkzOhM1n!pt=%0dV+Gt?C066OT*_hpr4wG-G>e6F`4Un4S8?khO8_eI=jdw3ElKGOA`-zMSL~!70GAA2dcR zE_x}YRha%2O&;AdjA`E`8F*Rv>811f&TKNbH{F!cVcfed+h(2@(J z4xr)(wm|oXCi$!47hd{Z@wWzoD>kb9Rq+ckYFGas75@PE|3k(93i978euYm3|GVNx z3W%5}+;u&s9~`)!3N^`fn1_MP?gq7!EaP8gGv>EcQq3us%N7NBn=ZIYRHT~i$b2TH zU7X;`DwKy%K?H;K3`--_&01uMf?=Tr*(laI9$OjOvoLDfKT1ybR2ZJ5-ffltEI&EG zlNIW<-|Up;&uZz+`%<=1M6=DV{MjDoGxLKnoV=>KV?dxSs%M<=1LjoV{Cv&1*Cua{A@W#}XgOCt+y zBK?}4Dp!(MM1HLveQ!n|PiKo~m;SkPrSz^*#ND8Hv>vMSdMqLE)Q{qt)PTDt`-7MDZhqB~&DGQIbUyrw zRC7=AI5foLqdb3oT_D0BtHfj)A3C&E*m27Z)FIlS4s-AvZ$OmE7_YMS`0yGHhg5*t z(suv1tGaXv-X`-6udV8&{z&Onb@}eqJF*tA1csZb=-JVY8E0CCLiYAbiU5%P?Fxs7 z#0DF$-2EGeZEICEZ!S(ADa6&h73RFwIjHx4h z*oLm1CS}8ztC_fz9;#S-+utfrmPg^Wu;c#4h56Y`k`4AVsTaJ?r|1eB7VjhXC5M_E zovrLIBL~Byb05g0-l3GuzxW4OLTseG4sm_cP+`5r->14TSOfWjKHe%SdE+Tj*2kTMYYbAAx`(s1;Av@VkP;~&0pc1W9&E_vA=9GfXje~!51!>X;qV9* z?D}_H6FD`{UWQIihVp=Kx2CPFmU6s+j~%`7FIVEGl3*3?#a(;WIvytox%!;i98ihQ zJX}`g1*5n*nTf!;RuAZCD=DddlX+aE6Zy-P_^Hs~?pa+$`RBRiG-1bOQX(HhXGG-* z?{<DFlE@??Qfkq3~X=MPshc`QdF;RcG=W z3{%Tbv3?Ut*58N(DMSaa{zM-*@-HHRkNg{vnEoab|HZ$EL^S^okw{_xjY#JH5Q*7e zM8X`|My<>YmnbDfHZM2IV*XTWgdNS18Wc9{w&GF&!ezAhQGj)Yx+46zILiMu79Y~; z;@X%pu=88s1|46J0ut-BdkS^l1r>9`$kOdE{V>amI-?ThGa?n%LPGpkcd@X@JEg2 z6{N62e5P0Cy?%=2BvEiE2+p@)vf`DS7V!x6YqKg8??fNj?;|zIqpMXTpiU4A=S;1@ z*4RD5o4gT%s!)fej=(&7_%w@Co4U3@mAj9QEw!j2kZ ztbd`%3Y?N=AoO@g9-wpeiU#Cu%4lYk|IfV5^gr@8cf6K{*R0Iz*SF)7yrtFO678Bw zQcH|?@TUg`2Ad;8R|YJ`Z!V{zrWe=xON~p@3f;LS2QMx4gO;vVn^)Bv>1wIZeM(B( z154B0>+d-#>!#po14_0IYVs>V{&E<&CH~RN? z$a+5chs7ZH4zL((Ryk&YW1&U;hb*CGtvs1st-8G;bvoIQxS1G=iVyF=(y;!dX)g>K zvYY>y3t6EQ$5ec%$PS^0J$H}?_=84FVVy+)b3t{+^bD99cTt>$N0Xon6OhpvUH+GT z1EwsT;p;#58z|bCu=s$U>A48?aOWtVGf$|!v}Chv-TrA>2nO6`iDP1#xbMGbYIQb! ze+4_aIHC&P_C5zAiNO28!rkUF*%S4$8E_|d2~c}p#6&`&L#v3d&VE5aj0X8VI|mpS zB3?U?-QoelZ$t$9A?dXzIpGtD2l}6uDm&x&m;7UkuXy@r z4Z)l1!$C+`l~@TkY`|WHVZt3er2k|wxc<#z;IIU=8x{oqwcXJ5-`fq|5s)3WG0JWP z-n;Ee#U3~)RVsjeuT0&Xj|RBf1_qP8sbZELk4VpA^gE0!7r(?(X@Ubj+Wt$sLGxo4 zUC|6T*m;YiPYXge8>G@M&2)=qi-nLpMS386d?G9qniIOoqi;yHu#FEyP?BPpZd3<1Dna!>j zSotDh75!;+5Kt&D3vJScpRi0CMg<_C^W%4@w%RIG>S4)}ps6M*CNTdpcBiSli7-Uc z*)m_M`eZj5tAM)a8HfudgTu|M$l|{18nZ;iSyJkMY@?TM0$shOmzjW2hZ8#GG zqLk$m4AoQ!*_v$Hsf9|`C*Jmdeg zxwI9mR_Iucd5}1DcWLYuDsVvv)ZI9m<^M_3d$GL%-oE^BEExUUoOt{GhVmRda~~%} zlo*KLyu#O0i7!2DPaPFz`CuM>3v1xf9qd9iM>NDRHH>Fs+D~<)Rr{8f+pe7y*9yg} zOrSBD=i#r+F)9l;vSh@@`z(pBlH+w;9CoWw;zqVWut8k7`<;i*Nb@yBj zho1jg&Uglb{LmTQ@gf2N-Ioh)IW`K={e_-N+^?Zvtoqjc5Vw%)tC+Re=F zDFk`cA7#`^hdvLzKu9ZQ;RoMB>!$gOOs0l%Gqkcoxn|%3ma5ZG<6n}$s{xSwA*X(t zz-zDttmRIw-L;eya|NLs_}23WBGtr{GQh11)_TcEk9gw1Ncltj)_6k z{l%QYC z?=SDhO~za(GYUyk79;?)1D@PC4U71tjSu%!IYAYi2KK%Vr^AOP3$UqIkW_1{1M zNoV2z6$BQ_VYt;{0trt^;kBb#D6SpcVQS!lb|hDA|Lliiwj@HC%LQA#?(<(T5MsCG zh?0l;?=!s|uj$a0=*xs+=``7gu2N6P;*{44i8Km8}!$DiEi<=BfHKDuZ_SQTuQ{X|=9KVEcYw++#mqEJGlfN8dG4ziNW~WzxCH ztUj{+<$mLKQIM20ms|iH)U!1I?eqe%BP5SMrjAU#4#9-WuGAmK7i8(?h2plNts z+|L$*F8z0yX!ymJKO_h#p)^9ZH_}$;D@Ryh)0$VDj+6D%id+{?M!luyGd`LN|6Z9n8WQRo(Q@Q^0~AFib!s0|txnT=TBhpN%hg^aBh9EK%uf z4lPzEb%#GbBlRK3XwpShryEj}Z!^L|Hgl6hqDYTBXns!larsI~5Zz3iUc~g0I~Xct z%a3hbzhTdmPl6mG)sLMTlo^loO-bfSVf=oIXMo$c0?OFXkJ3V%VmEu{JFM#ua$G2e zH+d;jAJGLOQ(;#^M2Zzr62NG&#n{kD?&-B7!&};;WY}Ue_c9z7T5~0o-H#yFUM*0% zUMREHlsc8`yh}ScS2y+CN8Cu}UB#&$VYV3d5(qScUjeogWBsfb{|}nJ{JT#Mei7YW*6i!X^Oh+=d0f@# zdmnSwP44?UB0bWW_*?@zF4-Qtu530PlAy)&Jg4)d-HMKjKidl?ctuR`ab~X|PA^)8 zw;=g4&iAWm<9xd7VvNZN5>Y6UsXS#;F^P7|bFP&2a^6671~PsK^0A5K?ut|T=yZiy za;AfVKl_0qqy|Q{UCSPOL5q>N`9}mHB?n>M1nhD9&v&+eA??tuIa%1Ivwyu%Cz1E2)F{@_?C0LiT6pxtVsQX^+(0^dObXiV0`chhS7o5#Fu8D_* zujS{>?jl6SO|lCV!Psv<(S!$Zc=hbPfo_N;@uxG}u^CYm zJBkzfz54ME_B4E!3hd<0fXE~Cuo?M7NU%Kqk?@i|`abr3cI%_DE*B#%!unaNk; z_Q~{IgGCI^=I-bThafGyy5z^ZrBw2U)jgyt`H1wigoX`~nKebKii?w7o`bgiV?~?r zR}p&B$ulpv2zoQVYF_!9ei$edZs;i~dm6zk-o9`C@4*@>rWOcTkF-E2^^RR}g zvBrN7u1)fMcjjTOb7|oWA^Ob`2XZGbtY0uo$&prT3HYjN9X$(*s2Z)$!fgKfN$pAU zE5WBLmSj`Ifq4mfD6kzW2802{`%hP(1M^aDJP!$Do{>oeU4kzOh5{;n`@|?WvekSz z3(g_no*yqWZ+DCEgl)Irvf~JV({}3FEbfHuGCO2JJV|jgU2)I1X(53d6nSuhoS*t# zfFdf8w*ARkuL7dqp@`F_ba zBf)K#k$n!@XcRVSX-{(D7Q(92r2e)Ksw~Tf&}E@anmS`XTJ+eiqs+WcIE6#V!GHM_ zkrcG`R{W_~+r$5!04Ml6F)sadntA%cTrd_7V(dv9m!vZIXzYG4B)_YtoY(!8$!&cO z{2NIV6j0(c6fm_aPqhD7Gbnr0Bp1qpb2cL=T^QSrWn&F15yk?Uj+&uD5{7ce;L?7I zJ5f24?-13TBDy)0DuT3sbg;|E3^)JT?iA259zJ3HDqq#k{;pBx`4y1)_h*O_6VW4? zXV!U?$e4doL8ycf{DSeq`)xOT#d1b}Bx6rPrT3N} zq1BXRo5&SiBJwLE4@_a_fe2Y8VKjRUy>9Om_Ng_-W`?0JUy-=l7jO*U_I)N%T7ibE zVEMA}N4+%Y{hLnyh?4V#xQ>-(d=x4qH`C*)A{Gys<2{5PkWy= ziZ{~@i)G(nb0u=ZI!}23qpmsf|s&l z14Xs4M`zF~=a(2OZIlMut>D{%-ht6Pa2pt#-oP{qJlouZ!K_ReWsELJ-*E+G;AF}D z9t#a>7@c{rBi6H&kBJ=;=Q;Opyoe9P*lKGq#!2h=xM;JK=+|@<&yv3QfWs1PS%|sS zEU-*g(h%JXMQFWYrbr009&h9^O1y?raaeJ+ z2G|Ol50NxK!JiNdj{-`7`N`x>fO(h;2r`aJ8d886ku*Npk1#b3!p}&8*Rt8)NZ}`U z>lN>l2?_VJamwwa?GcXNe7>O0;70@7EYH`4IhmNpFStz&#^y%?KM;iJ1@@4I>Ax`} zwl*ZjN>Xs~!^BEL5+^&_Bep70yMK%nGV3eZ$S2PiW}JlHgI?11TJ892u!qMhN4S*) z*_c;ZpAgz+9Z@ABnPyt;*O=LIhOm%<^Ov`q6RvjEzFiidGrDGM{eP6bV{{~sx-Z-@ zCbsQ~ZQFJ-F($S-nb?|`6Ki7Iwr$(KJ^#JWy8GOF&%4(9p;xWy^rxyy_m9t0AgrM0 zL<4R(*-#sQxw(N=DD0QXKOkYm?|^y>Adzs)tZ$ zyb>Q0xe9gLxe_xyE0yj_^)6hzEfgH1Bs3P8L9B8&Wy9ln$rSNTY{bw&f}Fj-_awg{ zdvhL!$Il02R*pObn7wh&AO0V?kF_y@Lp46{o0E{u$e$O$t#7{Au>7zcYmtErevzQ- z?~&3-oO1Yk&DvXEW$*P*=QVcKPI&#+7M4J!a05QiH!zAcUTmh=6&vD3rA>7_Ih4`q z9um2d72+m(qsfwp?CDCO=x01gBEQsGe*{E0C&nYmK*%obs0;^Lb|M!uf~#UF8kP_# z8p1XY7WFt@e~ApWius1XSq$8h!?2tRLYfB zGGhSkGW`Hg^JbBhUU`38i`l0D$&mE{H*GM2EBq~N`B$OkZ?q09znk zUnMs%xAe$p)Ervi4ce5My4ucfd(q?| zj=x4g_UMz=J%`@!Y!`+hm`4RPe6Z!1KD$FvZ!#+LbEhA{SMl9Ck+eH^Dpx=P$&V>Q zHLC_oy)ij3Ma#Z|2gob&;TEa{O8q7UylY4m38oJqu4U&Y{&;0^o&#IUj`Lga^Ap!` zbLZ2|QOZK1pYs30dZTO~AU+I+X_4r?gxVo!Y~#Ev4$V?$zuULc0Y#rq#c$PYZ8Xk1 z_9GOf$r_YCeF?Z_TtZ|c`EB=@%B=k*o}NUBuvzt(s*0o`6i zBS>F?G~mP=b3`g!4=5_?wlRuHOUp1r)rta7a~amdexKKvx{iXAtchqDHj%_q?o$)8 zej9ZXG{`-Y%D|O|<>y@W1u+>!#Bm8a7G`D1Ke@TYAmdZFeQVbbqeN-Pa5G(@+Wnzv zUMx=0vbU~?E|}P{*w4Fq94lgWRwL_sa?M7`{PpyvOG?DMzh5D#?NxiBcNOa|aBWnc zTgrUbUDfOmlk8Sc7WGu-i%J(F8dcT)fuEe!`Kf!VHJXxes zp668WI8W=ZqE|~4;2rM5c5?L&=l#e^y(s?a8=B*)Pn#GGX&mI`v&O?7V||O2-acXY zY47elS1TVa(QEr_%R9adPATPP-}&;QG7gJ_VM~f2I+cyMZQC;vHb#Zrmi#IdyrM72 zdR%renXvvl5j3hs7HSI_G#D?%xXu}OfoV#iCBePt&9_$EsNaU{t8|zw+#~@* zs+DmiYh2h$;`aEpq^e(%4_k=ycN*A;qgVe>q(tP9a}v{Gsw_zBqw!Y~ zld9^VTI*f-%nC9VT0WCftvevDKuKZz;BWqRKoT7m_T?rpdaN- z9b;x}FW+%zkGWuy8kN+<8=bWS7OpNk<(w&?ZYpM6>4FcNtG65|vmnfnl} z^IVdj@}mkT&NQwGYh;uASPCUs0cOU<`a1FUd!Swsg27~g=r<%&c0;;Tcjk-t=*JC`0!Ukha=h(oM?b-* z8Aw6SHE`d(8j2WOj|JmGDa`VawO2sbKnbxyb5sfXwkO~WIdE{saiPbsQu9aQH1&K| zyO+HJ=UU~Oz5*wZ1^An4k9Vgjp$N&lfV5VFul*Q}@dIAFApVb#l|I>Wj#@5tOoxDs zcm{ZGL}f{ed+w3~*Dx4%?KP|vrN2lY5j!Nvk%vhK~1>i2&pq+Fq$fVr)9^L5V@Hg>xo^|NGBU@a;}}dyigaj zwgmaQGGk8ny29Rb&l?&Uux|6G)dtG7ar}!2TpvqBz>yGKkT{YN(a0VVWS%O95mESz zm7wV8YsxMdFO;F6^%cV*Da=mr@`PfbS8z*TcHKEKefr{1rlK@iKEXw9(j?DWKu=g} zK_Jp&jwd_bV;8Ub*T`cjuVW3Y2f#Uu5+b=H(mg;4aTWDGr*{kKUNH+PkAUcem;nO# zCqz*`;Lnq-1fMmSr80rmAFgN^5?I(tZc^9<*?iv6L;qM%XzHba*g!t9vaWz;F-uOy zMj}|KgPwrq7-s`pu_Y?J{B+oErj{~Z@x>=%2+%St_Hvrqal{PIBa5^j5{x|tK^2Zc zK4ViZk(U!B&h6pT7*_9<@b!))M%q21UH)rnPL-I^%)w5T+Zm^K=a7@lS>q*CS*mIw z`j4ychMhkYhQgMu5YKz_jd@Q{aS>PXPk)1%?9 zbX1vNhr1D7CzI?Y4^;UYIu(OMW|>i&1?)`oz1A|lyA8hIqrZG*mSLLy?O2u~L>DtI zf#kdr0#&>i@oDwzAQN;lJHCnzLJjrug-0%oQUdG@yYN`viEk(XuJipf{@ zSp>UB^I$a-t2W_gOnL`CU!GXE1Tybi&{}WARcVYwY)vSO4#=ucT+^fu?YH;8w3pD2 z_tRRojD&c9k>A#2O^X*q!eCFp&FwQ){tMxcz0ptEqSa>P#$uA;tc5_K38^*`nJr;Z zxo{vD(YG+qFA?O#oUfg zKvr_oUz4B~)937w%&#P35XHAD1?IzT6-eyq{XKGiXh=S3`9Ae6@oxnRvBitV$bt(f z!Csy1PIi5kfCqd6x&6WCU$8fsS*cC@2?Y=PMtJ4d^_e?=?Wb^gg0}Ukss;Gepn-Y& zCS+#*BoGr3AO~tCU<#^&U>gT!BC6~4=GYY`cBF>#(M&^NAMhfB)rQ}-E$}YDg_Pwx z*l_Fmc;xmEpv~D$U}jKXyq>%pi6=Cg7Y4Ee@E22kq$A9Eux*W`9v!((!5^USpm$%U z7%W`wA`mkEp2#%gO)(VI2zJs+R|WVwT#tJ{C2@-y)gGpwxoe7g(vO^wkS_QM^s5Kn zMo5k-u4EjMZXeOwND7|b2Kq0kA%e@O!rHNEhd@Hqb$qYuD6&9rLMx<#|J8#gx~+`W zgmw@||1-`YvL+5{vGcdEJr{)TXC5BO$SUQQ{;v!qxXk&D)19RCv$naM>(ScfC;Q=B z=TE)1jQ(0fxNfXcu3*+88*`E3@Hw3c-m(#Ey)WXb-PyE7)bD7~_Mh z>WjP*ER*{kOVXZn(i1FbKW_0K(SuZY@CGlY^WUlmvLI@aq*SDK&wMWSKsL{N$Gl3j z-(lZQYemZ-RPp*TVNRBZ|JZ?4Cr$j=Q|eT&L^Q4CX zLY?#vJ|hE|Oq6*M^b=B*WJA=gHoP;jL%NCE;4k`#FtdVz4eEE>LL!zB9@hOp$s@WW z@bRoF-HG;UkX2lp2O3kPg)WB42uAq;XGmGEuNohHS!QfuShA`Nzwk?JJ;ul@hAhXC@|j=s{{91sgqSl58U1m>|csQzx}W9+1J#$zB6J z$xG?1m9$cExHl7pugvY%ExFY!PF$y2W zQpgnW%{Pm0da@R%b=;O?)`wnurcy8HnpNnp;(h({MC?LNL7-OPE~Nl602*0<3K>~2 zk#j==)}^@*17d7LI^g+<0Z>ok1<(RC`^#?tRw9_6r3Uj~=Y(x)XJ0>C02Mpt{Zlj@ zuSiq#Z(v8TAT2bZAT6NI^&7@{AbLrZ;Li0?M<*4CA7#=W{u+R)a4pL>4P0)E=C!b& zADEV<1>BEb!_xkw6<5}9=j4=hU~B1qfQypb5K=403KoX8j;Cpu+}$wMoCeDmj#6pi z&=P4l05{<7o`4Jv(9TN?xU&I2@}DT%YeddG6=r0QnvgJ@XQE9#lJQ91M95c2T8twc9Jy*ei9;40UqcgkE0Vpv<6;DbDa#c?& zNGX8TW%&WLNw0om^Z|-N6umeVc|da@1?BDmieP__lrL=oIHnPTbgep+4LeZ40T@kk zaYs^H4QN%wc0utI;>RA=fb9bUe|u;mT_HFkJi(?AktSn*d%(6Tq9EtrqjTOQZcmA~ z4NTB&Fvzw>A3@~qu0hElpr0Cr4+ibgcxL&?XJOfYB%5Pny~)ACj>i-S3s^;aK21Mq6ng*7jvVr~q4 zrTZh|V4s|~=#>{me88p{Z@6o4!)Itrd>OKsniNM?@@wCR@oSQ+KSD$~r2oCkAL&Ze zc@+xRqm*_S`nkuJ;so6&E9nVakqXlDo%ELY#QDTMXI|v&oigRqp+otE;-1~CE|>B4 zr{o!^3YoZmEY$_sa6J_s0?<>D^)F53pdwiO+ZcwRF{gj@d=ps)+2MbL!>9Vfl9tu% z1uX}Seie+0+qJ~J58o$Ch@fk~d#Qn{D`XWp9oN0FEy39YD%7vQ#kS%CYBM_Z_^`CE zO=ES{id(1=(Z$KPLyjdjrvfSZrMr*kf) z-)+x-uk(#B6oYg|`9Py5JCZ$H3QMp>p8N#7vmyOZ!!x$(sBLiL z0!^H-kHp5CUua2db)SVaZ6veIq|`;_7d74Nn;B+Ol^knWBnI-ts>Ng8(G5-Nsk`=62m`Oav+6AqLsPzy>Y zxUmf!RFa{8f;5Mg<+yg7y+PBS=Iu+2BT^6P_cs};rSkO*P1~c*;%x(VVF%_*2j^*m zRpndVj1?^<+qmSBAlGOh*VfXp)iVy1pj|NY4b20(HeB-c$g*cRH+cWhm>sK}D!(jz z!KPg2kZe|A`(lh&T5g|jL-WmTzG}7d!)EoC;-jj+;Ws_@ z-r&*yp=w00{HLmc9b+(-dy)wC5GrFS%`w@-s>u~>0UxWt4$f-34UDuw8P;N zj%y6vEdNErwL_7cMOw%+nFjV4$NuQ?#=0J<5FW$U@_UG}nH;oT6G48@Zeo-U%(`%( zI;fNm3)PAR$7(yqL&+M?NIK9}r5R*Z-|H;2ocLU2@{(wuh;W@eyGMxOmKa>e zG%GSENMnd?4ugaS3wZu=+ii+2U@MVj!P(fcEE6~!1{_!k?~{3dwRjy7hU9C%^4-CE z9Z!V`Cd5#%X`e|b#X>u4HEmiLsA=5XxXY}t0VNIk=p47v{>W%aB4!9H6IfSy@kAxA zL7DXxo=2ox@EQt7(*J*Q1|f={X!XJWEN66-5tEK;|8F@%rUNKvn7C{9BdCH%)sjUKT2 z4=>g3A^lza8=am>DfGVu4TZ6$<1IZQ#*x=31%ZAi$+U6jmJpw`MAz`j6|Y~3%SA_H z#-!XI{_pK`f&A+tET@_`7D;LrW;aS)4vwiahlydx%pI^e-yk4Y6o{n9uN=Ugg4TwA^&`k5k z8&K7Ni{i)&mXolinTy-~t7_OI_aj_Hj5%cYnxeR8|5Y{Wzj8-=FzgkE8c+CH3=cr; z%fmFoi8k!_2jwVieqzbdzJ+UpbM1w54&iyz7~GMF-8$@v}s{1cpGmOr#EY) zTJ8yGLk?F@8F~6>kQ+{e((#HO#iA(w-5okRdN5tJ8NGddf+7m})PbR4k8x2nF;d=Jn zi4#Ti)dUxwd5cFfx6wVNEB!q2H_Q6ghZs6pVK#YuRExmMYd2YiFf96bfwOTX@c%{k0Gj_&tC7An2EU7haLn6H6XZg=*GIni{O|P9}W=13L3-e{u`6LahOz(BJzYT%<&`U zWa)%8ole`lqE`9q^JIvGU9_i_K%=1zwHCV9s7CcF*G1+dhem(z ze%aH+nfK~x1q>@aHg0bJFDYx${*3#hTx6jmarRzc6V|ryPi%V>9r~g#2G&I8A1cuh zT+UX+)%c-tO|7q!>+61nDwdqY&xd}6^+bD4h`(apuVx=Nb0J@-nbe78jr5Uv$W$kT zW3UeQ+&UM$Z|3mN5%pVh^e_4kfosZg+X(XHnbg+Av_>XeQb}9 zmuZs?vpbLtcEtfC@+LqzR2@AQ1IFp1k7QqZvU~?W-3p!G zu;lXP9L8%xhRkr%LmhL;sbk)2V*mMw>+^GvP(aW7`RTU*SBxF~#K*(_+xp`r{YE#! zo#-9*h2Z5WxB-->rSMV!0?*W~@*lAFy#V|pa>55$*BqHICLzPMbooOkHbS4jSYHhg zxzB_`_>N$^(LEEaK+B6-N%eGa?P+DC>}h`(Hxo6>Mej2kg6qr}%Cyvk{~>>$CdE_# zL#~H7?n?be@bH@fdkzUqbSV)*3I4>~Di6m1%;l}C@QyCj7EoxymOk@~QZ8!=T@LCmH7 z@`nEY)8_QMP*+7MR6~W;&&Ks;;B{PGkRi*yXt+O=#Iq(hSX|x)EKP#}yy%n!&zum6 z9?r$iTGDCVAgiHrTPLfDUKa^E-}uOgu?AJ zA&x)!o&9|7rC4f{SrR7@O%A}PBmZQ%j?^9Hc*rS)`>Sa5RE|G>Hmd8K*6?FrG~2=QtKcBcawRU; zLfGeespPg-b^`pLYSkO;9`Ww zLj=%4CYm0<{V5DZx@dXsCSy>YS7!25g_GQ<+!umsGAmoDK4en=ukEHqel?eN#N#N7-{XdON>_BJmlDx&uQmXpqytL38Zokpy#30-Cm@4MV?F zCilSFE1YO@}hHcK0q4Bc;$ww;_3#hv}N&Jd1X_>5_NhA z3Hp{o#eOTeYG?(^mD`0PaY>4QjG9~jFUI1BLeRIFCa^vT-S?NX+>YuR26yfDd-OHI zg#@vXNKV-w!5wCu-6TToP|LN_nTr^aTZ(28)?^iY+KTdmGv_ zQ%utuCYr{5H@^6R0ggVdHM73t@R52KTZnusU9I{_CsXuq9>ZojA9#?mi)_|}o%{VC zr}pGuPVIK||K!x}suEqwj?Sb@kDSn-D#vM*Mx$%->0+7|KviGYeskP45B445)#LJ^ z1EHRONz-gIeWAbNyx8~q&Bmbi<{YeBDwL->uSIE0W803B%SeRjfIc@iI)T_yqvdLw zlc=-iVzz4H?zcl_)Lx0&&Y%e+~45QIFHyNrSqE>=_8AJ_ooeIo?gY?tLmK>Y2Pi;K@6M| z?N%S!EU&Gq=}KfEpADb>;FOiQ{;o`G8OY3fsju+#D)(Mx1CFjL1&*$YzhL$4;7n)P zSGl@=9P^Z*yF1u*qskJ#smXO-z%t9$q{M2J>hv=qj1o`wah~BPK)AM;s|`6fnm1Y+4mN!d(HD<4j?g`lh8QzjZ-`Q@f0K}3`~6w(SdmI<75v(`mvv!UP&xxaj- z?mtZ0!_V^+35m>I%=4_}Q+xuK=;&64G5O1*t0xHN#nrjcPp3r(VAhbM+ZnQ6?4N_4yq35>D?7r<+ogc8}jp>*~Xj(CE3 zZBe6xetl3;unLlTRM74R2xb`IM)3|_S5o7WhSZGCw%>{D#854)-RN?dsIE0o^f}-z zfIA`Afm8n8HyY6%j_7e?`>j*J*1fR5!9h2dG9dC~O6E%JLki>NFaIqcoFo4~(xD~f z|4xUdU^q(i$xu;8L?fB8uONnci9G>Vb*7-2yxqI7nx&idvZBp<)DwOF@~z!G<9PXg zOyWxlhlp>)(5Ar8wEI2KEo|!_8nfs2f6^GfU(k5iO+{Xk1f3-7-%tepCy#cilL@zW zD(WAP)=Z4yn`sw40UYED3ijBTu!P<-rHjDNeB^c^@!n9G{zwnu8KZ6>qs2$rU;xH{ z1vho2?;w=SP$?7{Ce*YWx=VBk<1c)*NM%xKCy1pRRnt^V>Y~cK+_~aEBYeKGTeYYr z)*H_RTX5Tu^^2V=O+Q_Shz}p66U~P`&mT4tDd#gpHKo;(yD;XAA&PYqO{iFdg~3uW zc%lF!q6i+Gh#YVwq#edMI(`HPY&b_gYu}5fsaRTeL5d|D5=s+h;J$mvia$vzJM;Fv z9)O(GtwI$$xx4$X6g*Lix!O6Vy$T#fi8N7}j+r@YXqg6n29( zik_Vhq23?#*}4;l=0GaR4BC(RqXPjlQc5U zs=Zqp;~N^|>*SrPnI)d2Qq{q=d0a@JaYia0%_>K%x>H23{_{XJWbb>0yK;o3GMB)j z4Ajl$j;0EXS3j7?JLRNq-+3WlwwU*F1hQi6(O=9?T2_{Q6H*4pqvgm*nhxT{(&;lz zoiza($X7*Q?o{O3=}ohDkXYu8lz1*Ciz6+{(2+IqVKlKpG%0VY9J@=;YqD=R;5Ub`9cjIwK)-a}o2 zxg*<;t`i8qdDxfIjbjz;?fN(Q%%XL?2q!dy{2qmv@F;0NT@<_$S$u#5AR2&YiUNq9 zLIsluG{$IB{R>W1qoe6j=#T-i03y@LS}|}y?9F5^Nc=umt`5pmk$gP4++&RH(IfZ- zdyHf^x{H-|6P%5p-vT|N76kM5z3R?6bCP>&p>zhFN?0wM+KzXsbVgMP%4Kp%Un}r* z)s*J?JdNhsH&wdrn+o|ygDmiMB5k5>UZFkqo6taMBV=F6a3*e06o+?;qf@!SmvPkT zQV{9qxZcQ3F`(oFEy1Si6dLLXv4@WhrBZw}pr3EN5Vv{YGs-~cK$!v+_sF!Bk56J& z##Od|ZsX$49?kc3ZPGEx#0Ed22j~z{`luQjYf(RXO^VQ3(+Qbh7e$TV!PC zcfOy`xhQ~DNLivrv|MALuFprsp)7LRlcS{ntie9a;6|uvw<@gvPTl&J%9EDKPeqXv zM3gaa`IdwSA8*hDQ*V=3;WuPqd-)lT@l$phBd^hUVu+TjcoS#7Dq5&h8r>w9hjzUu z;)Vumd+`2iPS)kHBLH8w6XE;y+U#^UhepsYQ-GBHDVk!$es&FL4_Ol%cX5GZRy0DP z{4eJd5kiMESHQhWsEz!49ofciadxNp{ivLGP5^iHxMfaqw~r@|W(HuI>(QHV!LWYm zU`}y#8@LEfRW>Lm?C(eN7UFK5S+7I53G$0d?B*p#1SHqb7KX2|vf)}YNnf!DqgF_3 z7mx49^7AjI!(5yrB`Vh9Ql*;kOn1c(lC7Sy@ZYir2RE0iRzDN)j_+EmAU~acKFgln zk9K#FpdU_6-nAyjTsffWO87CfsRftv_0oM~X4WOeA-pe)d65S{C4A^WdTe^Q8Yn1g zT8CFz&n1|TA3VKO@@5Mr)~4{TLYR-Qj#BSZ^E*+R|XQ_+6@k4!3fuarC|-nL>Lwx}yH92(6ahmxZ7?5KX50(>$edqeS{$jRD;?~LbVz!zJx0twJ2FC_@MrL)8{=2o^f^MvWK!ZFTabgYLr zkBjtj_aK&v6gBg|g$kxbw0@u(!27cS766(*M6oo%qK=%5;R$%cDazQVAe|;}^WeYR z`am4y$ik(786cR=#K5}*aJj1<7;K;0uG2T#GY2x$n^rWAp1-O6gzd;-br$P3;mvJ@ zH9xd^i-PP?(Le3%cSBF!EW9qK-)s65Jenhf9x>CskHW(V7;K3Zp(N5^iRvn>+SR;- z$6-U*Q;qUxBOJ2yh(s>iqxEhQ1DiMY_`aT}8CDukWj zrmLGs#Uvb4VHb`BH$o(gjSF>c&eMo5Rm4p;hTAUSU%M*S9|@mw&Dzsbv^b! z$Dz~jNB=#!5M=m@#L+gwXl*fpzuE;a*)9vs2djLUlnys9sb@Y5syrzLrVRBPK@vZ< zMf+ts_GLFaFiQPrW>F9M)L)0B@O%6J7QiN28i2 zB^XOGOsBedrmz9z6rU4woHjmx(1p?(d7KnrJs|C@!_1h9UXRfMpP4ub2ykre2^p@x|A2x};Ki9Qpv zp5ea%(#`;F1KuuhAm;J+ig4G_m!7iNy>*Vsphq zL!wX=9B_<>S}~DFvOt}$wOk?7&kzwWqc;*)Ah@iG=MONGBg|3q7uP~tfd(=F>M1G@ z%YV*gS@V@_1O!6Igg?+wva6BZScY| zQtr=mW&9qKs>@6KLkS$A)Re#_v?Z^KXiwRdsTzK%T1!8gN!zwwQ7f|HaB)3Ph+k7= z5oOUHojgn>wmS9;+f*?n_~sbYN?7+zq31`F%}qCsIFaoxLp4+9*dg_myTbz$eJ}BL zBs2=EuBS+9F0B&MqT8Pw>o&$+Tm<+aNA<7mKc=6}Stn1btb1~=XlxN8MInpe6h7XG6CLQ*f$%Y0WpzeuJAcj=hnzl)I(4(lt#Km*d%JfQwi#Zj3F-=BY*^5$f zfik`aH*3lzwGd+;j<+78y{ZZK^1UnbWh$@v9f}IJ;W;L~;vyL|@or|u^{vN!hQo*yEQIdv$_Tuo=myz&EN z+JR9uw1F}wpJSN{jbo))L}6mzT-_78jJa4KKPAh)shhC>Vwk57Y$Z-!#7nC(&29a%mbVpU-D!c$Q^nNXvw1SqfR~*3KyJZvLYw zq;aeKR&^~&&FPsbW740k*5nJ@lh}Pn^Eyy1QNLoYFH^lzC3hsOdk~|lLc>iR{wL0D z#tTR{j+RK-HWRclGTf+hV8r%JSrt-|AA}+Pf5-$4cclh@gWNu;1-T?;A*XrecSL3n z52STGSA1TA%R@YZQro7s0csYO%+1O0YZYm_yX>u(W89Bs;s|oLte{Dm`b0?LOv84J zmo3!>#keyk5xy+awRa^h)ILA|#qT46SGpJxlAL2!SZTXs#C1#orQ^c+6~60i^D`r@ z^*keuHeSB4Z}m&bbL3!|)%Chj^J+ZG8ghEO(KC?D+%y6W4*x;$XFR^Yc{{KVMi%UC zZn7}L)lVT_C&P{m6#tySD5NG$MCKo`aGvAqGdr}mh=S%P(AHbMh(Z`o@ZIdwo0+6+ z9-(SoI%Z#QZM7mxxZQ}5fJ7g4`58>7;#ZWwR+wDhEr2TEe$4*)pWB~$@Yo&GX$Gf`m+2VaWp|n0nNeC} zg|J{0Vu0kn?@r=B($}@}|4Lua9q9~F!y5|yXZ3TEcX2R-Xt$~=KVX#=w=lPOq734> z^Dh0}taDmJ22)P4=W2@hq33lEb+Yz&7nr{;JM%TNADKeIwkGeqrk(9X90pHFG|V|D zJ9b8VWi1(}Zux$xr)-v;#}X5D^aS<*Y(v%u(LcDC;xW`Q@nCV zn%p8K|-y|&J<8LW+)p?lOYV0@h^naB?p?3aN3N6J7S33hT{$e2GudJ>2 z{LA=%4#0E#JCbcg1T-(vqYlf1pM|Yy;sINr<;(+-&_7>U5Vo6!NK_n=MDs>X+NgW3 zcb4{rZNqo}B;T1H2q^n`2*cQCwDRT39(=eVX)dM3emOy5C`Xnu@rWMWG&*%0Ymc43 z3h$#bL}H;$M3`~3oL`;=x4%PHRfBGbFw5p!5X zuHw`67Jmix^XFls#@bgfF7hLieipL^$35OV1HQA@#3%-%8DUi6gkQQ+Yic7}Qq0uM z{N(Rtmtu*BzL=L9!vcsg1|xV^)pBj#5Iiri8?~Lb6V+XWvrBg8L~Z6KUWq3p2RmOA zCm%tX^0-In86GNt)bPg1!nZ)KTPoALU=C$@r1H)GeI6X4P*&$C(L?cIPvvm{*!V;4 zhf%{c?SSNyHa@;5d7B9%-TSKk6WIl?*-!yk_j5V>kGh|Wb+&3{bX@m0C+3*%Nem41 zmH*1XX({?vG%w!K*$lEw!u+lKoi1@BwY=u~J*IPbzb_BzT%!Z7IP9K&1cAu0W&$+< z-001!(6;2T@atEZf#WK}jIsWWQ!nm)KEDSI+_et?pPVi-h^cW1IH<;}>TSYtm@jkE zO<9_3d$CyLqE(6ni^g?Jp|VyNVp#Ro!4Lk!8}&oLYR(}F`D%`f>k(h0(B`K6-GeQOrzB^er|0zK0DF zo~<6gUFEh1m#AUs!n3w0rkOX5*tgBA!Hy^|J@?ba4u3XItHBC(s+!~resimY1NZ=5 z0Cy4blgLL_dS*OucQ!i_Fb3Y>ScEocyV(++Pyj!DBtXZ>GGiAaMO^e55z+cWLQakl z$Oc#&lzR*T#K!3qT$h*sBj7S39&q`ExfS%1k`-wq#n|o2D>O9+VVNWS{R2dK>jV5f zq}q@XI&`0j2di&thv~^5Y|r}5-jWF7WWSnPdCQ9yx^^UbQc&=Vg%wntVDc-sMSz~@ zADy%NkOy#OfMfu(6cL29sD=nEUPTsKv9Z}&2zoKZ!=mh$Ip*AkaLlI93u+=rY4U7h zCdI>DlVvPkJoBmTd&hN)PkwJBYxGA7VE8p0~8xl4mY?pJbR}TTL1K~N#4P0Yr`sA2c^cYXcmsQ z4m?mt;a60bz+ik!3gT{~EA10ws z$vJGrlQyCp)p@XPUhVDkjm_ILTqiB-mMe@N_rG~bI741RbSmap)c^#Lt_^~vRWMc| zr5o%S@JUm|= zeZ7Uyb%`_Gm-tHL0xkH`M&$22Uuzgke^{^Am#l;5_}73!a{ka|%xmBRkdXkw04V73 z0F*;9R|qoc5QlIyI)Smb-mfbK<$UkRIV7Gvir2vWI2BypGUU`w22qaKJlHQDV9dHv z2NMU)eut$eOna2To@7h3D-2euE9{n}H4t@h!{E$;0WI$Pk6R?9?e;)F7I;ueP6p^$ z#d`o*IogIR6iJ=so8VZL@=XU*r&qW-(|^I{C!N*yx0oIF%9N;Q(@I8>t?M0LBT+Lr zbJ5tG?HSw=IMcAerj8#Frn^6zSM4o`5^obc-}biBAm6_@l7=M7iE}#FC0A$o-qwsF zAq|j;+mMf%Mop*DQ;U=LaqtQ#%a2Pd8C7F-h6HU=hUN){`PYDUyXQe`!lax)%$HlGnm6CeB9ul(LfJE=Us9hUnkj>5Db;fLREFCwAX zKA%jj@ZGdZ!Nue!RCPs-xWtlkgHp}ddD84Oih*)Pepr%D;Vx)Q!3+d2CfBLDJgenU zq8_EJW}X?D-}FT~sK;NoPfc-_@%D7r8b0&D+?*zCKdcUF;h@i}Y%7dW+}!85xGy>wc`|o*FM!iTUU&2V`!tcb zxqqgKAPxy%85B0q(os~@xK5xb`ce|aGsSPxwcOpoa7jcy47{7HS~Fk?<& zoh}}9k4~86=tFrrZUm6rSQ52GziZmM&Eq=~2yBS|9wxr)nym>!Pl#qaC!99oS zYbSXdbMhI3Vg7jG#z~T^HN*)&Y15AVl<8cyd%*Rnch*j?18xHSSD#N|%-`{f9yvqJ z^_z`NM?_%^nGP7+H@PCOr6MT^`Drz;%XsEjR&5n==Qog7FM4Fo9stlg+Nl28j#YGY zQQ4wCF3Vr4nW0;F$7O016j>EGpb-#PR#vl_od?OQ#EPCXE`+%rID)P$%?r zi5@3QJB3F!#G$8@_e}py!&RixBV#WU_4d>;-nkSF;?~)APP(#y zkt_!8p9QbHR7)8+oKR*YoLQq8EV@B^5^4~b+5`bBcR4XM;C4vqu=@_We`9f5rDTzE zveZAz91f(%YdKlg)EIl=f$cu+%Ewhh{TJ%mQdI7&J`H*Nw2l+@L?~;a#{Zfp(Q9n{ zAIQ?a8i*`~^ONn_JnXKnkuKdlL!gfSV1Amjzp5$aOO^ibSrWtY-wSzI5fxvXoc!~Y z$IU*U)S3?^4kcSPGGq#($H=&T5sZ~4XCYtHf|C9VY+88y12!2N$7#&dWVC2>$)c0`GEaW+RN#1p(%%p)(S#AC`!Bp1D2AyVpP(Ct zO1FPX7K4GE9$#F!Eg+?J^0#wAP*NAj+Ys=p%0S{qVxE7lUoGPo+@ZUS$jr%=mr>t zH)sYr%1ELQb7O5Q)G=w2UngjLDBUGVD`ye+-YMZRv6tC~6ZS3f`Me4K9SlpUQJpLXcjVTU;p5HId-71^X0~ z9ZY~bniq-2G}{$?@g`!#?8}O9Nm-_XQfAeD)%Rl;4t_pISpWJqJ6K!F<5*kAa#~d9 zuwV|Tzy?^o`UR$*)6Rch^NiLGX9GX%`}P`b+1HrD-RGNQG*-W&9DeQU5`JyptI2a; z;U*#&)ve{n37 z;@?o@gIxqvH?g9_LJ zZ_SQ6{yHbv4|-})^orTUq)K)g$Uhutg)Le(AUf*5zX92O6P|eL6$n-%wO^{VHIOK? zu+Nlah25o>tIGr5w5b`WKYtA4zYb_MKW;Rv^`ZN`pAZuDUk8@BF*iyZRv7A^-;3&X zmG6g$`*eiam-_N-+gfXk=DpCgjmF5Ca&5An09-j_BccKER8kO$eezFBre_&mrdnWL zSU-}41V4$uMh@t|jfjCPXf;sQBPxqp@PoG}AJ;9+PES4SkmYYB_xQ&26P~g5mDwJz z54fry1tUZo&>FPT^}F8N!LP7cTvOTGwBktFnG;`m(U22@7#tb-fEWO}6bn27T~pi3 zl<mmfe=>B<+xj=~?aLQi3);|B5LQ?l9>vI6qOBP`wa+ZNUqi z@}+xfzW~9c@iJ^A1`T4Jkg+8eExvs4+yQZq38cw#|6ioNb95zdyDhq6J007$ZQJbF zHahAU9otUFwr$&H$L_H2>fiT`efQpHpL5R`cl}jWskhcUQpp%|&SyUJ0Y*;sQu5Ub#(bv2!O>b=H0ryL8y$Wbz zf_w2oRwn)YUS@3|{A{@dS7YB)Y*D$EPY##N90cVPT`JEl6Gu#3)-(A&=JQRSC|aeu z8>d+sWxE4X|Hk#HCCMVlFhH(gApVLl#gAu2f_1Kna-bpQUXF{ z`p&K}UVp{-W#~#6r&~c7r2VzJt*Jj$Kl@m(Ao{>nA>x=pop3R&fGHy9TB5s7PG-#D zPQFlZI3&jB4yg0(L1934vD0LkGX{DR)hyk)gC_UoW`+$hC_u z=*d!lpW&zH8f!OzWmvkMNI)nn+1w)L5s`l>7IvAug=XD^{|EMPAX11Tx$s)H1EqLl zqr$Vr&JNK*80ZWI-nraKiG})hWpnO)`E9N*;jR*J6#tPS5=&dJBN1Wt2w9iS6edT( zC1raD_K?X*CrVH;dpo#D`<;3&3)ACBa9$=AF=c4YY47=$TmlLvr*ec6R$Npo6MG+1 zPyM7uu9e?*Nj)bV>l4*5_&=UA(H12TQO~hh39B4nGzd8sSPuqndb?>Ht~gR-ZA{_>YpN ztAdS8S53FX_DJ8dtGE6t(UXM=Z%W-cz)T4Cgb2>(6S%1X<_3yjPQ6&=J*V;EszD+W zVcPUJ-3IQSIbeET)kI>Rkyq9#o#nRi%G{Tm{UNvowyDCeik~(a$L~Ft3U8$gUjIdo ze&8{`hG3z+(;xe<#$0y%cOK zH!QxSj?~}wmBY~1@7-b(1pmE#g{2r9J!@Q=>a04I9bW3cdb&)f|513~-(7KN-?k5R z|5121qUa2KNrwn=$ln5~fHU?U3j!CqCuV;1Kqy=ITMZ`59ovDi9>Zi+N3-IJhOon7 zj4~C){EnetkAzU|nG?D`U=)d#m;;S;Q?VN(0|?#1HStmK=iiwQ0I_r?ROx~}VczuK z9*8yO<;LW)AM2am!@nF|Ivm__DG$zm`BNz1-$IU-YyZLh=;gPNX54x4Bk^g-t{4bI zgaY}eJ*#YzD{68|ryE*gsyU_$VAQ#ErKMYZ_8EUR#7^lE!=w2)QWq+J?9tvSX+64WFEsg1yAv7{8zaTlLLi*!$FgH#@}e{ z;_)dif8Yvp4IoAyp5T17yQ0j=Y(X?H7v*rFL>iu&EBZ>jQpAcm0oMlZj7I#v+qfhX z2J`6L?0KiDLB2#0)pou3aw^+(r?SvZX-l^iW9PERsghEOSg`E1KjC{l9F95?qZurI z6F>P%Grd{cFC;bFfR&%;l!^1VwOR≀}2mZj7S%_*RLBSuHEj6+L-w#|&JDXXFPr zu^DB*X%RFWz9exz8GbqJVX?w~WhwapN*fdxi4S_T?1BP*_E+lC zTvA?iVFK=jd>Vy(9EF>TMR3JbNZDx)N%c43cLCuEb$Xnq#F92&HrRBM80T-hPpQ&s zS_O3aZ?YZb)_$~c#$;xfa2ue>!-r5O~)h3K>Ws>=4+a5m)q z(aJL7j8jN`zM%I@N*0ZT`SqJnJt5C|RZ9c+rHa5Q4b;ZGC@!k}Y#VWOxnzt3<3%Cq ztN>>CstHdex^^R1VXz}%(2TLK_~1-`V5}wo^kZdPwbiV$*W6>;bPkNcHt+7xy1i*! z*S z*ossqnOH(Y3t{|_&X|cWuCGMEFOHD2Y-GXJIc}xi;G(%`OB){0eRwlI2y}yAzI|Fk ztu(2Pxo0FC;9Oj5!4LxZBeti3sIl^l<%3w9{xyM|uf}>^LN{z-rbAg>+8)XNF zL8n!_p($Hy*cUj}qt^nl!7y!V77ssy)M%*K8z^qrC*TI>ep|s|)4#sWqG4NhY5i>? zy#|*_XU4xXHGiEowH{O>8cibzkz=-#JV?xi!$KR*ffJqU4+=urDi*4e!4g1fw~Ykk zND%-DfVYLfqGDO$qzR~nWm1}j5CRK;31cKwaBoJoyhVtF&C{r_U%3e_FO{wmo>=0zE~*-c&`__{}PRMqchA5QIk))Io;yt^oVcAOAfM{ePICU@^0)V3eLNGjg)N1C(;?vMo`ULKN$Cl-ow(&!?xUC^!mrlML zX^pF~+DEff`ZCxmDRl{V0a$#}vnhA~I-Y=kP2|3TeoUD*bHFAGC zx0%|SE1be!F&nrUDyWq$u%~5xf%6tu9fGZMi&iiBsd(s~HAt4LgyII1g#6J5|m&jnW~K6#pt>6G@rq7LQvz z=`51j=`=pc4~5jgK#O=ki_%D@%`zE{O2=wZJvCDTjr~_8m$^L2`P-r~B#a#O&T994 zzlOEgit(L=g0q~gkNna9c>1TVd9sfRkMyxXnJtG77N&y6(lO^yf@U76@^kX}$V4B2 zH|8t<9C@$tik(xztf$MV*~?#1*mxo6Yl_(r~*sDMHQ(bow#3xxB3ZA z%sV_f(z5f*b7N3GcRGtK%%JV!c0(p*E`5UYk-!=8Cjqi{l%IQzTAmWUn31 z*gk}5=kfUm3GUAhL-%QhctoK367{?P1_zDKDU#woB`Lawz4U~wY;58^p}>?M4EdR% zp8n^+`-=l7TMPWxsg%jCR-3CUoNy*wpuoz=@#;8P3A2Z=Vxs1mRqsTrZ7n^E^EXeh z#THL4l;GHX_HY09@{#oHE?G`iVCYfsccMOA?D8(3JN9q28|U5s zC0#p)r)vOnbW;p0_A1_TIH!eC_rk9P)$ zrGqoBttfVWtRx(ZC{PoIB-Ecm8<`h=1hi@f=XzDgatjX|7)YA5SaHaSDralL-T9w! zzD@ZR)}$!dBtGaQGV!^FcijlQ4Bp#9dZj`u{3Cz+{X8x2@{msVjWo`9|F?qzJo}0m zT^k|cjYr(WV=06UXrjpCYLh#286SkkHh_q(0R_9HEJ0*R+FHd6i6KS6c)0t*k`&Z0 zWI%!&p?z9P`={vYrz2Yd=cpx?(^(p`;^+ZS;D2Om)A@-1<2}lnYM%PfehM66pr1l0 z!QTHr`6-|QINIOGyuW-fKKZV7p3N&S=Sb@ooHfRyV>U^OQw{>LcBVt18s}ogoP%(Y zh5?R~n?bSUPO`5SZr;kE7v1)YPjz1 zE|oC|K`n+vmRv(&xdUP~sd9W7xWLD}4Ly?Ni7F}<(tP7osYar+yB{+}voME&+^`&O z!u5mv%|x2N7r6IbOpMmht>Q(9ciDWakFhw2HUG}y4WApFMtm>lVpdR&_3j`-hcU62TFih#EuYB@?GZv7= zf!l3SA$&^9Auy1cL;i+0hg@Lo^BIaTf10^3K7A`!L_3KJ?Wb|6QdJuhbW%<{A%vtU zy^n=bAN&O-3SqX5vhPFv``FS!pp?StT|VmBtyC+D)_*J=Tu6#FStB$qlEX0I8{ipm z6#=)K(^yt1x+Cu>51C_wAeRawhrJgx2%1hd^+b-xqOb4{MSkV=U5Cm}cK@d%683K} z+l}goV!OIV1b~bin9SK$8fNu@(q1qe7@$Pl8X#8=8c>#m1yH4C2T(+ppa3Y6KuZ_^ zC3rFbIY1f<1VD8?iNUbGOcaCxL@y)3e0P2(fg3;vqBTwnNHH%3p-WB#)5;)(4IpP9 zE9_N4>QzA`4WAG=tI%4V&!q^+YKjPhyt3OeQlB<{PL zsREMS0W@97Xs};B0}`F#c8jpBd!>&K0}_wJK)Iyl@2IlW8o42|z`0-?RrCOq#q#P; z{R;s+0ci;`Z((V@_(*YRM3h1bj-HixdDqts!jz^gyyKn{~Q)aUS#9MN`5 zu;;Zm!Tr1fG63u#)-;v%c7(16rB-`oBA2*Xp5P}HhrgeD;QF1iz&hh@y1xCClHj_p z*Q|4%k^M-e`I8IXTtM$Yumtr=D9tTEE$9BZh#o9C5*O&0a{~itvIIwU%mLk<%-{6u z)n;o0aJAq#PVWS9$|kyOt$?m!02E-57K$btW*Y<{@=Gw`K?!3=Z3+6i3AtD#4GECD zx+TM`&MP33Vtd1)ICnqK_x21DrFqO>R(jAm6OTBaSj{-UG_%d&>t{ce@12o~n;^iz ziZ-va#f?Yj*OA@Ov%0=JGXw%ED8T)U@cTV_raxTFaoOU}xZNvNLV-fQP~K%5XvFxQn0I_?kgq;pYA$ zRxxJ%r$q@nA3yLziRDpi_Sjw`J&Bm!vBt=*_&!5_9DO$aLDLZVhy0zId4fy=`-Q3M zg8?KXh6vO5PUt@J5kxGcjVF0fXOm7M|>e`xZK}+CXpV|(jRM>ozwu8OfQGuNEif{74uKVR zpKmkwYwi<8*Vxe~*XC$#co|q{$D8fDu$QuD7w1+gCRnj;1w@GY@Mb8?5pj3J{A?EF z9r0BtIxwEoqg*m5II9H1B-6`tLvONYSbbuOz|ZinI!KnE{NsF;9AXtyqJ6GqcrwJ7 zSq!FSqjM%*S!^b>!+}unZ4O7ynQcsm!eExLS4$mRc7h9*II{<`y7vvWevJ8E)P7~} zaj{cb1{x%%926v{co2D4TLu9nClmT+QvVClWJ9-6PVr0FG#oV1v`fbUAMu}Al0AkY zuVvr?dgCx~5J-(g8Kbhy>r%xGq$y_Lg-BbGiwEPYr`YVC{)9LM$4JPJdezY{2?$Hj zQR8BpZovuxz@SEusA~Ku32VZD()Rca&^Z=79WF6I!3KgD@!sF(gD6XJCE0MCs@DH4QUS?<-oFtT`Y5-D49;`HM9z?=d0ZD$EV?x z;IEes3VjU+1)gD#@~JHzbsh@c<)Z>mGjJ#f*GFZ*UczL6Z{C>bkCJj~oIms(lJIB# z5QmL%E11y~dDy#Xn`q#>Xw(;?KgfrKz$0B`iaOB--RldH+d8XCtp-V?tlZ=?3;Hm^m!a?vc=k zZKEa5>IQBbnv1i*L9Kcne%sGgA3&bq+MjLB`748;=2uBQ8h(5qsq2{@+BO1l0 zh>8i>-Wyee2uzghrVLaafR=Os3rwu!1~r8;{W)A0lNp!&A`YeR%o$ulB-94{BNuXm z_t}gYf-)^l@=-J`4K<28Pa=jso=-uCJ`Oaj4#7d5RG%dPcexa1!aUN7`(Z&vx<7ry z2}aPDG3vnDF3vKd?pn_A`L<^H>7N@tCYXzKXqOmt*!kfR4!(ZnSxcgTeEW%~C9G`4 z8Pq?X6o}c!wi}Wz(}69bQH~LSt(NT0IxeTltQG$O>1kXtZ!ZyRMGI{Rt6p3M!~PAA zX6-c~f3W5_HJo17g;2B?865r#Iy6>BerQ0xvokc7h%P=8dJQ}>RgryB$#;=r%paJ; z#6s^g?jwJC5e}k)JJA2AhT#sMJGp|k#8aASqSz;ERb-2_I3jpO-~7TwVjzZUgth!K zYpnp%m?4Umzr>KYgeN0Z5um{eF>Af9hOpHEsHQN}+_NqQhYHZVmxS_Ux5K}o*;VhY z2+%x{2GrQY*E|7gj+H>aLUd+O2WT#opms(&NIJG>sDZY&E4RMBra1DEoRaJfQrl;@ z1?AVcJ@mit--YULh~gA_K(&>sTelw! zvaO0ce&2L%r}gG8A`^$dE5Kx7`EOz)1Qr~Vdk(na#jxQ8cke2mOh6sLgcZWjc3rK? z;Rgg>W|IC$U1XwGK)tm{Kz%u=Qgbj?o-H-I(Aule8raYqPB%9N;%Bs@pm&rli{1d{ zvvMv*&e<%oXJaY;kIdZ+M+xv-T*MEN^!y8D+c!bPXkutZ%f_M-;2x#p% zMipZ^mcj4(t!61Q?6MWdL~d}yY2)O?k|vhW#`Q{A#Z0S?U2&JN5B}4TL%&Ih-UTiO z4yr{5)4;u~*LF>@>_hL=zxR!W2gCM`+K^}$urf9ZmHy7+UWm4PYw-+)6)+|*ncWYy zwpCgxiocl}04oq(OmaBol*B_~9+1z~8651iGV=GiG`4?{r>upDc7n)|TSr0)UlkBe z98>;nV4g3%y-LM;*-KJ$iF#TyQ0a_CK1BwPAM5V6NjJh9&LJK)*)-bw5cpkDE>*kE z|3Vf$mIO)F3`+nCDw)-%6J;6=>$~LK`)39`pQbbF7u~aO!A;|tF(E9kp4?*J6{`z~ zOH+5(%v{?S73#}##@BLH?*^c8uqV50T#uhGLmdxxer!_kTW?FDjp_~DJ`@}3bbV)! zQHPy5ZA^~s_%(tr`S_sq(pc!XX;)FEE;Y=hB5;vkLBGCIDzAW=!M9s!)N zfZa8vSWUCTXDim_YU8A z;Bc;u%cqMT$)A^3%5T~~SFRpo6k`+=I%d`-DMzxeSQo#JKtcO_e`$D1e9Q1}%Scj^ z)7im)m(qf62oLEm2J_8i9oUf&j19SY_XEZ*=(~swakV1)^vkFIwbQv~FT0*>ICj;V zrM8G%%%^?MIy+MRriWtXA?ub}tBSARAjejbD7H`TbaOl-IfgZjVa7!P_GL8GNq39* zTZ|rI>`Aqb#Tv``vK4U%i;nvO`!X%s4-S4#f*n(+Jzncocd|TOFb! zF4-oTkNX{96r?8llA89RT&|bmhM$+QGC84knv_gX&k9b6*OJ-tN0g8s4f3(T^N%5( zZrGd3@&+A3WmqY!Kc!S(O?TR7Rh$9e_q$J#jm+~!bGcfA!*nqJ2FZ277PqX~HzEu< zex;lJ^VsRN&F7F+QFh`n7rbYasRcK4pxG(sHu}uGXX+flkJGXu*^(WEJ6P#6^1rs8UyTb@)2 z#VeY!ISMd!!CKvZ?4w)nZa>1mdakGvqX~JrLhWblkC=FOtg6~O#S5=HFvexo}690eeX&v5CnI))^3t6(Tmk#X6Wl`_NO zDEUlM;w29i58*^6<8f4v{bqq3&HF_vZX}sauX6cIn+hAdS6lnX4`neJ*&JLtlV}2c za|PjKO;RHk2YUr-i{^)E%C5~we%QK~smfd)yQy_ke%Rui4V3oSW;N|!{IKWyQ>%-> zW||<&5+(e9RU3Kt5#8K|PwIfa`Rz*eM5#zd&kWx`#Lf&KLRjb5zBv*yZJZHl??Pq0 zqXdTwyW6*C{Z9U-T{jcPSgRI#v`$rh15Jsd5bEr2)HyA`LyfyOh}o(H6(|{6Ub#kE*vtUFo2V?NFPRJP zb`mgbPE!RR{l)7X6bFnA(vprLV0ga7I&j$bmr}3sx0oRk3IJJ20VG)oO~9Z`1V}`( z3K;5wB@PIxC^ahWIw~kC7#XA#aMj>^NxhV9WB}fN>${)h$sO*v1X%9i^OFX+5aR+y zkN4vmG<8{!B-v}>x|bGTfh9Vi)g8>SD>)IzsQORu3uYdJkbXeBOoi>UO?-3Iq>!^u6-TPc75%M97S~SYx6$0n20MkKGfmb>R+}zW^kdAQrXk7B0TFhS zq={dR2NQWJ%a4#FzA^f64R5{zdoR|3zZxlw=8`iA8>Ux!qi(_mvH?@sddsbf9kuhC z6-Sre!a&mgVv`y8K}SI$VUUd*c=pxMiLpM8PVEiHH3SfGZvwf2M|Ol%?NG&DmMqb> zj}#+E6CJY32;9UJ2QCO?zFDlk>gRGHyj#Z#$rsidQuf?iCW8BGpF` zTw>yzVFx$`u4#ip-AmhgnLzjtNH2Wx<9a`mhEH}1R17U6*wJS(Js|&UwKWd$I>Fe= z%bCA!sXX;AveQ%hOM0DsmWjL2=1E76&)Bb;oZcRaJm&cYDFznK3$HedopNkG{<7XO zuTS-1wLhshKL?dbofhJde){34G9tVLo#@o$AVE^pT}CFHpWW`e!(4i=aK$a@-NBPK zW$ZL@Oi6d|CPk-ANyoC>2ci9RRfum1vQpDlgum`oO`9HNaq7WYw0VBe{V-!@dJIh| zb3#+?adNB8d5a8yK%hU^YPQOyq?8Pc;B1sr0=8j;z|rtWfHeK!+5P=#vkTkF7;}xs zDtPFQ|5b5Lo0_82yt+%JU)x|7h*K=ivyINzviJODxdJZggu;#$xnfT> zUG7`^r2@CCtg3ZV%?Gk%A8-7U^NXlVvbcy;S%M-0wL0uLD#8T$#F@wtfArRBV%i{% zxq3w0QpOMjYDH4Qb;RHzQ{A`(IV8yr9Pmw=6Sc@Fr`{)kovF(JPpz%fzM7HJBA9AX zlkcEKe21qM*Qb#OoZftdA?ZL}5b<=-@dz#|x^mR74vM%aAA7}d0E`+ z>Ruef_~SBs{?f)=A?u`UVMM;s?@-(~EOsJ@o$V7ehrJW{D*FA78fYO6qTGhRvfmiY z6Gv(xCqFrU_H3k4lqNjchrR{OV4Pow#qiED^JW~|XS)4}d=8kvIIkp2-Lh``@uql0 zn|33>(unBWY}{{FL@CGZeV1f|uVp)w^yaY|ge|F<<3w-5E0%KqO-dz05kXqU2L0RD z4JE5^e#U~()qDA&9MPP4P>ANjx|0)e@LLLoi{=CLM@Z#g$i;}tzns2PJvNeut7txP zba8XeJD>71!*r3KBA9ECHQ<nTd6ci5KlRlqBpmyQnEqg1o_m<7%$ ztW8u$ev|kwtHh-IH>=#s82uyAzSX^0ao^)I{qo00Ag*N(DIw^2#~{Gs1Y-pM_%<-S z<+K-m97rKUwMwu$p({W*C)L-d4Et1;02g`HjhVMW?c;sGk#oIYT!8FyTTxdUpj%37 zC$F}9fIPko1y8QP(IfUl+dC6h3{m({N%;F{KDKo@9t~yKC~@Obyy)#XKv?vbgf$ME z8XG7!yP6b8pU?(Ayc(AWGQ8TGVIHbRDF$5Q$q3aVagb&AqtZ31(8a(&4*|g^d%9;e z`Jay!(}JdkgsFGD?7ee)n4O)Sn#lc6VO{j z9Z&#+X;FmVLq&U(%&ESi58m3+QQ^{Cnhun7D_P?=S(^50fbd`(MkQfI!8;jZ+NTz) zOwE^bOKk-*mpDt0&SBvMrqQ8GP=j;mF`9OOe&jX?Gk-!u_aMF9w4Ng~QwZniaByLV zX6E~-8#=O(0PTxOQmLntaP-JD0v6M*CL5phIr)LHS`pNZt@${(YU~!kDJwStr^xI_ zxUW+vBchXz3qLWKnH_Xi2@N{Zbo~}!Y=uqazEdPV4Sf>`x+#(jp&l#15|E(cV;kGj>Z+a1g>pWPag)pQb74$1hbf~M%8au6HK=#G$?Tma^D zLr);YbWa2eo-bPxGm81mpfOKjb)5zL&tkJHd(h$;hr%JJZTc;56gFu@-UehdN z#JEt{wH~nUhDaKB!acmkEFi#botu-seDeVOWEpPur7WFiP%!gqBUo;h^ir^3C?FCL z;M`)IVJFFin3%`FIB+o(_8%oh>oBK~`q6GzH4Oq zRYu{wBZv@&6c5fg-%(q5hhh4rWscE=n6GCC7$&}gr_7944VLS;U>3+U$EN?2JtPld z1rFEEi*{3stwgX956_-b2hrSB$|v4ghxDjXmZj*2Ofs$4`R=^<9`5{{&>ek!lATRODVg+(4;p|r$Bl(SG$d_Lfc^5ar;-Ou_!S{WpDWc=W0bcl_YRMSyctn=!WdUag#!7C21CGvmhF1vUv^K7g?NSuoQ+m$GnqzPL_c>;$j3*t=hq)o zWr8@mDCSQxP6zCp60aKP@7PJTG2lCyVb4ffD!}OXPX6@}KgT?eSXiv_8NsjUw_!Rr z`j&PZCYC5odd2WgK+cNXWvONNghuuwUn}hMG7SFFf)wZ6btdj52Ld>C76GNs(sk;n zQ+Wl8nFRf5_mu3MhChEIKrVupQIDe4hBgaaZ5mm-@EqM@%B}<9YMcH_t#72vTtEQ= ze0Lw5L1A|v9n;ZCCUd-+ppqv#z#G=HXg zaJeyjME-jAv-{w|mwxlqh#1;J{Z*dh-o#ov_y`QNF-c$TRoj(GRc8WzrC4j6jg#5D zJ}KFDL791@C9mKwoiUX6iGU0KN&B5EF&j9LM>taCl6nRFbGLTu<-1#K_IPv~%iv@(_| z=>7VM{X57(@wIdny;)8oroS3B6XqjY8W!=W;0iT|plVn@j;c^MC5el+V7&Gp95fiX zN+yWE<(i$9f66tTE>ZKmMGr)@ywtQg+ZPV0X_X%8AZMfGGfXNKFZwnMS%+{6YnE$S z4^#uMesAj1lxER1NiU!is!ew$RjvJ~dbO?+!(&PWO1L`iO7e;ol;=fsv_&8d3s#En zb5ML7jZzX!`U*a2xAi<{KWx(PIG7JIQT-N0dL4(7a|RuCN^0#?nVSdKn08{*9O!uw zsjv)N8s`&NVZ|SDp#`YqxuN>qf@F?w)%({{o?|8m)UUon8y8;CN!ki*8X*-?K2BSrv5G-Z1W50EFXEtG z08>xcn7koy)Yh?}WLXO^UYjuG$W)`FnFb?}1%mOQDM+s$Z?$&5d}x;0O}2t)qo=Zl zL6QyMm)H898ov97@$Uvox_nQe#{=zo;V)vJ_!-)0&JK%EV@Jjs9EzzdYWcfFWPmVz z?Lz2wKGY#5BFLX~Gxlro!o(-lRbdSMqyq~+4rnY!axe<0`E)pu-$1^BCB-n~Mj=s* zg^BQ*r7$xt$76`fOX(3QTE8B$6T>JbH3tUvelG*ONOC(+t2QV7N2^XImb9~!GO7QH zRcv$=dLGiLSqpz?Yw$x5wEz03jMf96WSDG}Hwo_9c29%afDkL|$7pIGy6W&ii=^4L-qS$i?*kVS$ss8ZQ?S>Q~av zNe{W(C=sohB5s|xCFQUDGD)9BRY8rHHSC|`KewLVbx&Bv{q@rC2Y-9IF@vF^uHMC> zW9DJBJK{4UgLlQo&i7mNv|$qg`3Q6 z%H3RfpI9mi7Fl||+Li2w9tqT<(zBgyfn$BQdl=Ayp1E$(!P{Y%RQnNh-b=aZnhvzV z$*)++_gM}+_H*cO!*b$8pCQphg>6Ko1YzT&8WjClmcqVFfTu20L6Y6(X>~<$fnXU) zi-rdFg|DrW8RYk{9y&)kl5HMJGM5P8sAT35A9VxcgW(YLH{7qOaEUYPGe3pzNakcG zV>ey#DGk*&TZRYSagMQE_FDeN2OEy-+y=EhTaUu4RxxupAa>j250lMn&xqEKM)NjL z|HUvHDP}3 zkHoPx=$eoyDd$AmP=8&TwHWCwqI@EQ;HMmX{B88k?_thaqt3qAN`1J6RXoUMdd{QM z7dTTo=-}_4@GbX~VwCzX2ETluLq%U9un|aqVY42?T4_JLK_MYYwdU5EoUcN#u+Qa& zvqUj{_g1cOhP!?~D!@{FafzldYbp(?{aVc9YwG%=aljHG#ou@)F_%M|<*wW89e>Vr zKD-02M{*2Yj1fm8^8*~lfUa8P&y%S8YXb^`TYKuL&oroP@i0Mz78_4lMws(IGB%00Fk<)N%0oA_U)k}$YMq@E#(1BM=)^451)Sq?g}wT>vskvj6xG+r=I+Vj z$L6Jzm4zJ2{)lL(IEs!eISjjRu*WSS(U`UgeUUTsh<%4sy~EL`NR084txoc)PZ__Pu)q+|nI_-Vp=0&B)uuK5*5G1b zro5G1w%mp26-J@spC2k)K;ZJnN!$KD+ZjDnH^280Q_o-aAEkI@N4%h%6K5b<>6_&| zJ?K;g+nF_;WIWC+F?i4w2|kvqjUTBRzMOhedX`}>-gHLsWWFG>$rY>-n$0eX!qy<) zBU{CyZLdvC`mfxP>1)PgJM~g^mhCF6qo<%tyQWflFn>(9JYuXhaIes@vOJ1r+AIjQ z+QoC#4wj{TDI3+?->of~k`U`$YeQ04H77Y+AU!y^inc7n1CyWL}0SS39~%@J?(GHpPsyCFp-MZh!-XmQGB zmR5Ky2CS<1rwDq^J!lu7x6dEH)*_F?>UrzSbW`rD49M1SQZhKAS%JLdK*Mi(>MtJs zSnYLNAqN5{7WU37?++XJO}BsU1@+L!bmRV#akxrI2Gmqn$cHQY!8XWQ94iV6_*>Oc z?9~?$X?$h4(#Q3$%7lw~UERdHY7F!=28nA2V)Q0kRn+TyDb?+5z%}$M8(UgB!=pcwz-+`z57f3=)`Ike5!e|ZXP)YIu zzJO8qLnG>6&R?C0zR1D=5tKZFYMrfV@poIgV&Lmwj*BiXl`Vz=;CKJI5uR{>6thSb zKM8w{n3llE)Usr#<)$Fw`0G#N5DZ+U;+;UrMDkb#D;uNSK`VQKDUw*jKAZs4{_f)i zeRp)k3How(JfU$kvC)Q{jxV#RaxLa13!ejsS$)#;qSYi7$0?9e;M zx}CpE1@lK=k$w?9*E6I2JC!?vKp=_1xkw0tfesc~k0HG7# zN6G#fF`CVH&O}h;)UTq2uU@bj`HqoT`Sl9BToNAiLG~s~GwcK2TI1DacCI<$!VE zS0(|!k_uDl3b^mhrMyj0C-5XVDqwe!GsqD`ihA}A=p;m1N4kriP`kB=PL@f0uza11G#ddAF{0teX#lfzhn1{PjH;!w743F{0s+bbVJWawlv-QISi zXNXnp28EV61cOt}B&ZxC_WuIr`b5C}A1oQ2|C1$S;cfB%hdzgc0o3QlCyFW3H;RQG zgdQ-;FC$OA>~s3eK15Re3pd2paoNF{M}9*Erln|ubBm`Z(v9aQa39}HG1bH3{A1jZ z*Z5Dyjl@6y7&n?tnl6XAG%g>b6?9)(EBq??q*-H<6?uP+Y&B=-m<_VVBq{PPSJuDy ze(I`(!PoCUH=Ppn4GT6~SufFt0+v0%e{|f9 zA&q+i-sbXH_l_-rGvE}!OZRV2JB0^dpC1nYn|-5edhCC&Z-D+|-}oD*2^T^O`l`VX zv~Tpp{BP|Wi%APW`-V|!OMjP$$*br@-yUNAk)cG`ASQJSyNZ1Wndo@wVp>8AB7^>* z-uFpnbJ9R68&3|7G0oF!__j?FJQ&UwJh0^M`CBJ4XY`WhamT3RSRKOzL(6HrwmOCNl7qqMSm6#gJy-pe;sTi z=UXiPJ2&G}ovL%nz0i&-^xpXUXU)mf{jt}9o;<$;2FZU???nDT)Vtun>K!KaluB_n zo9b(AZfSevnbY_nGWO2zXa^rK{NZm8qZ*w|(Nji-8_v4XIXyz5IyCSPl&E}$6g+3+ ze^gFHnegTrf!!a<&uPq>QXQQ;f}Wm2{U5>jx+9G#Wf7lZN!r0Mi%~ksx=uZeEA^h+ zxi)On72@>4k2FhXq=hC$_!6=|-_gG1ymyc7$3F`n#70mgN^I)?=9e)i_MWRV%i+s5ZA{0Vv=FzUCkb_ZIiY8&*;(D@ANMZwQ! z$Fpq~Uj==Xk>;O41oe)PvfMuv9DLj>-J zkIR==n!v5V_PTD$PWTY4KoG$0Izm4j0wti|8mWI`s|6P}-S1Fl)nCdN*Keol#+N#Z z4Lj4QO#w)jTXp)bw|9yPye_)ubk~LA@zH|sR!jGcnfb}mMc4ZX2BbAcOAACJnn_z{ z?tVp=$vh+=~Lx<+4ikim1-Fy@JGYaN217qS(ff%I$J_>^e zFVB$^+{DJF;#0FL8uWxaw%8C&*wT3iBx?@cYvi1h7&uLf zOr7V8CV%aWw66Qv%wOmg!J2mW-k_^E3+Y5nWKWNB;z)-BL?$k)Pgd_uYC1;GsoZ68 zc(kN|n~3&O>KwpL#2w3>6QC0uGm!n=QgLU({2sF$UJ*w@5l8#*`a&1d)-n(*4l*up z*MjbhjW(!b@H{J;kP==Idx5*6f3|U3Bxa#5kmjm;kkfJUSHilh2OryDDypz(N~NHv zDixEcXsWV^=%P4r;f*>(L`0>%sH&xYk{t+0h^Vc*B>Fy?7U@PwLizJPSthYdI46Bi zdO=3xTrY)@gD(R$*j@Sp`5zjGKoKfl@(!xkT~d-+#SNpnCv_g2OG-5)pjXXw3R?H{ zo*PtLa7S!0pm#a0S4NdD;OaT(iWz4*CS4UjO->U<<0`>; zV14_fu=i}0kx447ZjFg0DX$C^DyS+0`=ofO*byHY2)EHP1AmeJ*-R{^q#^>YZYw@T zCB;^=CX^HdS!Ghl#uRi{7%gc+85k3)v1Z_Rr}R|`C8X|>JKtKf^=*03Q(ECpq>Mm3 z;1|Gjevs?fdxoJZZsEzwfu4dcim951HzosAz*Hn$Pm>D3mwIDO$w5p(#)&Jz#-yl6 zc!d0tbgf1el#7nb*C1~NT~txnL?vvgxnY%4&?)QjW8q8xmT?;MoG&0^3quTY0pVWNf`rLLd(z5A&*;HXB^M zqVU@B+I95IJA%)tE9__*{Xmn!l4rhYNja>9sd74SIehslKYpOS^!|z%D7yjaA312# zbLkhXDH##E(K^c)5lUMara9U&B0Nk379v!MS7C{5aw2y@0RfJ@*}gd!>!9)iOb&=~ z*uBVIIPiBe-61YF{h(K`Lq+dU;l#hRs6)B&``(9e5zqnR4t6lW^;54=@kewQ&PJ?aKI09iov=;LSZt5P|TCmPpV^ z4Mb937(Ssh1(%RZ@P-RDV4o&%{|GP!WT=zCNN3Ok_AOPk6FwjgfFzUj?pO8iXXZ=- zCd^?#(yCy4Z%~Z_4oVgQ6Ci0bi2zyy7+Qv6fG%SD2M14GKzf;+5T-WZ5HP8M^HKj5 zS4MCL$OnMuwIV`)K;|TL5ZnRs0lgeyy>Nple~qO;!1LH4G)w{DP|)3o(E1m`K7zZz zIq1MSlphp13mgP@fphvf!uq3U0l!B9{og?;d2L~9@4y%l4L{T%#mwY~7fncJ01{_V zL5Vdm1oz1X14d;kKoY@-^GQHNyKzOgAVl*-U?M&sdzJ!-6|#d~Mt=XL@#`y#Pn-o# zY(xdXYoZFgCgs3uqUho0TQo*0p)3tSK5W;F(f|XvbuL*Bi0LFxBICXl zh6{WGqRd({NWMOVl`Yhw@eQk2_grY872l=}r@R_dV+H>?4%e?;z0W~rSW)u{ zjkvTqOqTf&8JQ2cE7Yok*eepmUM{Ikm**fy%GD|kJP{&Mue_Cpn1-V5Z7+*UErUxn zV=|XIfR;fd%{))mRwlXHHdy*CtH}z!s0r`~5$OE9hHUpMJ8`{OVhYdGhiln^*Z1Tp zea*=FLltuOoFxr``?q+WxYLt{8%v4T_H}iYD%pqO#2d?ztZy1UGyXEC2}0@t`RqnN zXXO_~RN^1Y%lxX>=&=8JkKOaq+Nbkt2GcTG>wteTCikK+FbW9~RPaDLzq{M?uCpGgnj;xyYi5|?}bFO-0{ z%Nh?CpYPI1i+9)eKGOrBMG)?8Y;3DmZ}?VT8QoVW`F z+K7nmSbyWhaq1G8xBhVA^#8Y`! z(6gXwWkaEV}Sn*FShqth=!7Y)prZ|jFTZP;+Eg&A=&U>?7F0WtABj_GfUw`NBD zS1oeYp-0y=`lW08FBuNZT)iJ-*UyU9r~430)d}?4Z6qb^8{Bu00|)T^-T`v%1@p51 z(+)d4ps=avZ=krh+iy@@8e;H2g5t5@z#Vo^lcNWuj~^2p1U`){HGDON zvrzruaP!AR3%yz35w6_0l@i>iE@{{2(PM1O@VR;F_rW|00oTUjec$!Rj>55G6n8AY z_#~J}eb#FB7Z_B2DuWUrCUAZa!~~vQCK8@HA&!D@UkHtqP#Pcml`5HxL@DJzk-R5X zEi#aUsgwD2Ib`nC)?Bfwda9bmbC+FN*|pg^JlHt~!^X|CIj7JA@k`6AsNP<<*92_{`XCPiM!Sp9Y?^+7O&83hEfs4rgzQ5 zke%t6GuAHomT`-(;a@FHg8x~`$p2ghDw!~HRX+ddJuJ+AEM#Q}#)4FF#(jLY$q_0a zGo@nTt@=~+&V(4=NJBvc*7sze3c5%KkH#eVFI$7`hlwf@pxeb;wZLjh=NP6oemsWc zR()Ugq@hkruRe3*BzICamd+{WA>&pe#?>TR?XxqEZ_nQ75jvH)Z7$cBB+uCNsmQsk zGSG8Um5qnT#cN>;fBJEb&e+HM^k{IEDyyzDo&> z?lop^$)7$?uss?47xB>gABczZ!~aG+=&nD#1Tc+@ALm-1=u8_6Eo_Wrj9IBSsz?mw zWVg1Z;50@^<&Ng7nfqHyFm?u2yplVWc^ZE|WmAc;(si<0A9ecCESvM`Abx}1aY|u~ zMVGoucoqmTbE+*M|ACm>e?!c6$ValV0dw%Mn(6Jw8QAmMJd`xrrFE|fsT_nfo&lZn&eO(|W)oWJu|c@sKlgZ}?C$CZ1tr;mZ&5&(O2;tGM0*Mt}3qCP0pP zwAw#5G6X_B1DJh8Nhdg^pwHI)gpN}6CG}p%(1lNfgPvh3?HhG8`*dXZYrf7T-U3kF zWU|!ugb0v*l#ldJ_ThAO$ok4LMlgR4`$zS(`cUTP?Lh?>MK$Ur(U_I4xsBa>rT2?g z&Z+s8#${%`(5>-wU`~5%2F+6P$b>@flLBRt^1sL;<;%KI`TU9a&y2u~D;pWV@^)4N zRBL}LTBf|y2G!OorsUvkD50P zOM!&6&Ogl^YfKSSz$n9GoMyS-LLyELz%+JzBg+pno`~AcuDqBvGJt@^@s?dX*91}wPlhUIb zC2gCTg+yZ3mJF^z$EPkom!@CM26f z;Hj4E^O6~-sU@hWN&M_YmiXp)PD!=XL2v3eV8vu=C_!4fYD+el>3AL7+Kzp+`l1MC z*DcvYKxw5pd}fWqz!LM&%lKPcg*lGO0)MqksoM%77E7VP$>M}e);eYray?2JbSya$ z5p74PA&7aLMJ_H&lL$>9v*9=YmmP6Pu?e&zK)?J6%t|V0{|U^d&HojcHKW=D24*9r zW&Q+al`MgQ*>ZH$RN=z;%to0oK1o$rYa&wWP|=d7Si!>iC(uY~BBc|bTrc2bj2}jA zZcPyD{PX5Bei95^CX`yXWq4Mpqy~N6QY=+gL@pq7Bh{j+XRdd`ze(-aZcB72DflSJ zCpbJGb-k3(%$kmyl(t04-O<`Nh@7h@iZHjD9W@=+SporTBT>{8Eo_u5r@LZ1LUoF; z*G?3Nx7Yq1)N#X726?3BobHtt>)%8dK`!r&-h*kCz2XBcZ=f$5nd_Z z#6ks<>=G-m)R;~M^|=PC5FzVgLW769B7^oF!GQ;f5PAs3azh3YJO?^Ua$)W}oSsfa zK!MM`+`h;L-f+jritG9PV*>3mTDtb2GTp* zJFA1op(n=%CIKcPQf{BBxIM^spDtN^e->vN@f4b|Z40La{7h^W@ucTHY2V2#*j@*W zYKOgG#tII<vnyzH}A3nuZ`&l>?+V^hJ3~i+{ zOM*O36W|B@Xm$l`1G+(cz#j7`K(4x|u6!YPc*0S8IRbvjf?VaYrv};k@B}#nr+L|e z+%Ju6)!D<0kOa}AHe>}|CAcS5)4#IX5(?gG2?rcjg3fjVGpL@x461X$p))C9N&|Fi z1|;i95tJ3mB996%wNTnzz?BmX#-48rSN{ZC?@3O=)%$zyQ6=aVaPAIpZm)B|5peE| z2IveN6(ApsAIj$=R_~FeN`ed~fE#d!0Trv!GlC)L0c4;<%4)!%p^y;?KL|eZ!USB! z23`R@nLlF>W#F@NeSFvf{UD)rFrhS;2atsPGSGs$YJiAzWH4lg5=cTsK_8$C^k@~} zfkFrXjy?o<=mH{u)7bn1MnK}rY;E>cKj?|yfEV}xeWx##07?d+@1zg(ojwRY0ez>N zZvi*-0XM*AkpsXu3xuHO8oI6+V)+Nfr{-9>+$1nY4Kz?j4Gh2`*&tvNDD*Oxp)v?4 zx0bJ_m>jR{Lx5$7E?%ny|AK0hY|uIWugToNcnA-JzbA8{0mc8B)Gp`v|01ycbgb6}Y>a!e6DC%UL zgqRMY*{BOG8c2r&Bf~LfE&mC_l75PbrMFckfnihznt>`jt( z%Lq;x7)7>be5jqvY(vFGr3&f6`6B1F*qJyY;fye{ke(+{K4m)G0L!u83sY3(x8bO& z5dRM!iG%GgAIa;#e5AtrfAf(7H~u*RJnqc#n*oohA!XY5MZy;UcZiB2aU3E4HK`ol zC8IdxXVcmM^gc>g;Qvzo(WGXlG9mAx&CKQDVKMkpY_9Nceo_q&&`-*4Uvjx!I%ehR zQZMJTCC++SX<&P{Xj{KX23Fv%+qId$U$k~?x;^}>i?q3Qa?P^nce8Gp@-OmEtKuK> zuH0s>?Hb-6@0AO)|MsN@bHsfL1>^e;_Z19>9yv6c+=8pNii|3dpGwoE|5D8K`uSkT z!PJ&3V~*%rY+OG!rHft#E{x&-6abIE*ZDgDPMZH)+Ib2RWSc3yV?@O!m3s#A`N-K) zQ(k9(){czK5|~fn?^A}OBfCVPyo^RfF~j7(i*%OpC{Jgk7=2u?Qq<1A5MNh>KNCZzJ40kC7Qwd_0~54hI&bHzQWDOrGoYvN!bLAR*uy|{m|%VKo7+P z#=C5xtRAfmeWq5qAR#^?$rGba@I-OX`y`ZXO7a74*Z@h6Jhr{#$~(p9{=ag-VZf8` zFT$&Df$DC@<>CW8v8`E^2DLf=FXRFBXkDOgHCM(t#7(V06^f^}56`)r)gMaxQmDeq($>Y+?%mc-;*(=;A*{TUaRt7t zh?Z^aB!<5SiA#@DYTfY*f!$tw10f@r7k0p0?*l<&tgAjr+n?n-8+0O$WY94@iB zkY4WW@`n0zjX|ly(=885p=?U0>*M8~4^}L(M#czNRcZJJ?S?XzVw?k`_Mi`(3F2Fl z&z*e7q{Pr?S|WOqsXX9PuGCLVKtj60sHQDr8~HMw3kUs+OAKer))xf_&ot`KqjF3_ z{Rc_6qUNel-XBeiszw_XQ)XyHzM#9O4epZyPqKwn&N_-RM=_Oi$rYY2z+n?)h zeFnrwK|PK>Jv&80_@zIdW5VthRyCY3i%%x;DD`V=B-S*CJn=4LiD;W>OeDSTFekRF zerW5Vs_u)F??|c?ZgQgDHnQh>(~RVzt+&N*m7JXEY1X$+?6CY!vs1L9@pV>hZQ$2% z9B+Y)dMHtgEe<5{0hum-M{aug0+)9rxB1cT=?WK(9{xa0S1B`+MCt{@>1FvVs_S$| z3BKi%2!@wTnD+?WGU5mI;X)b*ga@!A#w#b@viA%9p68sM!mB_L@Ul{Yt9tC7XPf4| z9s#`M#vdWG!<(O7x%W&cAI^r))JDf*(44PjDTc@5w_tO^2u8}TJp&b1N6EW;5Av9` zOuf{rWyxL}Q!4QMP`-bIy|eHtY@=4A=6NLBqUF>w=*n#UMh6H21y>V=hnC(95LFZd zNed{rp#cRxmRbV*Pyq-XbZWk$sKO^g@*2hf%@}98_pyvqvcgHMjG@c^F#=}npZ?oL zIesJ$Pik)6uaXQfK62uEWLK_d$U83bIRM_VXP050h6BSCm7SgkJ5!9*?M@9S%3AO@VSK zhTeqlAKC}{awlxzdUceSAh+}zvJ|_WrAKI?5xw`0M}>s3gPw?zZY*6 zu)jxu?I=l*Zd=H1vg0XY{)0i#$265C$Zh(fNWd?3ML-XTUp5)oFUZG>lgA76E4&>J zH4xwk;YtVPWa~SJCxqD^@fgshmCcNI66AD$g3Tp)_dNgg2CA4vYb|1_RI7C7%h?t% zrwYNicE8P&o-XHX6(f1`weaCGcW;0}SD=hI5+})=<+8|B4-eS=TJYjy?*lUy|3QcK z>v*w6^#q8SxtN2@Lzq`~+=}%(^>kU47X`mD;?#NDEB@EQg@=9a*G7fL>z6yM z0&pZvp9$&Cw->RtBMkuhkcaS=RQ^5H511U`1l%HolwXW5b~>=)tvBq??{eKAyyC@n z)j{d=X2^o0j~13R9IiWcW}=U(Hj}%TI*xXu3rBQ$4W1G(ko&h?f(|cYT}Bw2 zUr7XH7O->KaLmjY5aXi-VholqQIt;5`XdX)$Z5mr6K5~3vS7+W!llG>;E?``oSqrN zCWA*j4Av56Bq#@hcu*~(9XdX42-|4z(1wDeMc`nPsD zklgERDojL=Spx>45mr6Ssc6m+Y!9YFg<5;XXK1TE|j=l3N@61!M)O z*N;&F<~~>}>FWy8-78Y-SiPb*LvT$Cz3TUH-lhTaUy4@KD26`ec0TPfPl)iyO(BK> ziWpavRHR#0w_rY#mBZnoWjiuSPuxv|J2IfHgkoTcbmbi~tp!XiqYrTump{ke33lnR zvwCO2!^jy~r_x8-=^pzJJW?W!S7E*9U08`cJR|q2`gnP;mDJ5U`i*#H9)sWnW;z)} zJC1co_(jiS2t`OVNf%A6X;B@*kmyl%P(c_qmBv3gAQ>|D8V2+MfBeyqN31glJ*ie$VLx#7Z)sYHZt5i8tkQ>slj8lAi)66hQ8Kdr6<&*(&GHAHvwGf z7vh8QlWQ`mrWg(ZXv9CBBvoUJm1VUw83lYMDufUL!^Tasv{<2A4krx`;M%j>K>*%e z%8hRNT{F!dPDa`&~JBBz>uLh!XShJ@0Fl-@MMEJ2=BYjK@{(Agwra5 zIRTtlI)K4U8JGy?v|{=Q6@g4W9Q4h*Yj)k52tWlrp&DliWI-jZ4wx#41|3Ri|422g zGFDzcmFwZzBKcQ)Z49Q7~-}N7*=9h6Y(}my(_o1kJG)t zFl_h`X77E13`}eI?-#>I%-e%?6V{!z>O{sy7Mvrc49ADQUT6l(A^Fc9mPw2iSSwxIy zXFopR(MW4~@PI3(hgDC8pT~+_C8X!o;q7=Ly@dFI;zxFy6@5ob)`Z>A7oR2#=hdoj zy7$!62SfT(2aYv|H|=}6`j=&{cds3A?T{)$(3nbUf$iC-do`_*{YT(c6ANen|ej15Avvf zT~~}d?2n3S^{Aq}SPN=801V3M*T}9HmNV)~C9&V3|A=&VS1G~ZE3xS(yZ}6g6ZlV# zp;SmfE3ohq@-poFAaK?H-AFLXB0O<#s3V^8O2ib*&@l|Tx3IGMMb!PnjU+Fpynd-E@joB=6n!uTI7SBhdQdJUeyp9C`lCP0_0j?pOSw3}#L~}* zjiLWkt<~|TS}T9?CRxZDV&}Jjul}oAiz)4tLq=!l%`k>!Hmyn5O47Qq7`p^|+u@+) z9fC$|I_|Gxt$SzZ2<&7+dt(&4uKrD>?rug-oJ2(#Y)?7Tjg0>WaW=^+S-w?fX(hq< zdkDHjWI0*<&1{9AYmM|=kPcb<6H@AYkVuw#*#$var?6n=f_uv}I^3b(DMiKWA^yxBHpZ%ovu}%Y!+vY>)jHSYK}7XtenZ{@8wVZ! zaEp!mmT~1D{;DNH&hHS@-LwMRN7gvC4$z_nb^gmr0{0K!7K{%T;+`GbYD(2{AY({? z1GbKXM;c#D@hDwe$b9d_stbg{GdV|OV#M#|%qqSmnJ6oI95K41t4 zZU(deGr0My-<9|GDVF!t=<8-3bL{8?o(+QU%Rp}Pw$rh;G%vj7@6B7Xa;k6td-E30 zuQw23DjMk(-Txu71P>r4S?p*1qqyaGfQoyq+x(zU&_Apo@ZTjjzVWaw%0O)32^X9E*Q0)5As)T_tWJtV>%UZ z{KZ)_S@$mce<Sg#vz2m^tHC&ZFMKjx`_Zuj(QFYl#c@1tQulY{VBfp>yeTuiWHMxm=}RG^V{njy zNm%0>WHtwo3$2c*A~iMIr6C;`U}W#+=N91buGB~skO_5OmN;Tis(VFk$u>Xw zB7J0CwLGBLS_@%o(?LM!)mWZrYIj`KTXl1iS-irBiDJ{L78#uF*aNBv@} zFi`2H#o$D)e4OjclUO~bMs*D3pH}9jCQxo9moavLPTcP+9kx8&a~F-jDU^roB{3-# z2q5_hVC^|em!ZAt7(`O0()_IGJ33?V^?1tiYYd7Ca>+Gqad#AAth0x)75!vlBoSHb z&mJ;WO6Bubt;=NNm6aK;SaGsTOQzEKtCT2Se(sCr{kEtCJJMbk4BtGvn+e`3;bwY< z)9YU&PYFYX_uysx9K(C@?jHHhbdyCQURy$DjnQq4J(e+?L}DD2W#5FW)e6kaE$ms2 zd_xx%9$d40v%QVhrTx=Gy~fUt9~sE%oO2p)wGE%6;1D;dG#211*#} z$yUJX?XTU5F9_xj-P%6nu@G3b_?)!y663lraOC{nuUM&{wo50#OJlA;{;J23mld{` zN+GH~Cgs-yl)(8vFPj_R%p8q{HNWSeZMOtFoGJQDXqcA0t>_62y!iB;_52dvtZ4Ih z+?Q(L==u14hPLJuhv9Z?`D(~u@cmC?Y$3<4Xwk!5Gfi7U1zHDq3@m7cS(EmeY)uR4 zrg2N!StwnPc2K`GK1qIMlZi+Vsh?@X3-XFEW;U3Pq-di=Yr+OWx+T04c5ojy=rA&4 z*rZYrlgSz=w#W?}wnz#-a8OFb7#6@}A`n9qWPHe@8o*}KQ%$6C-Faj~(nr`R9UOuF zDB+v9?^+jy_EC0Q1C<^|ge+GE5Adx0Be-m^3TH5zI@qJBP)Ju5XK*MA(oxG4Ih_V~ z@Iejo8o1uQ7D*NQRevtzVK;Z^x#lnV04}CF7Lk#!JB@AT;!#$kzNgSy#J5(~pwd^N zl$c&cDx;<4);R9gyqHwv^~p7fImGvlpkMExwTD+KV>f$)4!S7SdmdOUZbIY9suW97)-Z&&mM&^z9b9S6OD8aEp9biwy!XoQAE#DgAt0&jQBf#0Jc8r9B}Z?K?wT21ltx=fbVt-}}+1zNmg0-z8HAMq1nxQ_XiPZq8e~6Jd^@KjM4U zmUvS#ICG;X$j5(|;p`eu1&`;8V<@yN_m^lutf&S%R`*fW0X z`qKQ1n{_H%+SQJd4sKV8NeF^KAoT<9+K4^OT-HIY1NTEp+O~qW7ehr_)u&bJ&#vzl z@{9?kG6mfFl--7`GaX7t`g6X_7bnB(yhJhNUxXK(>0;MK(v8O#)<@E?^E7%MV&g+) z8k5s>k-59Dn)-ea#%EiSyai+~UlzX}sT}#u+<5b`Vb(W#^I|bDs`@7F zy>Lepv#NaY{EjhRyoHeD#UWv}iLLdK!oZ8q#5K9k&Oc5p6|CLltt@sgslSt6Eno}> zfQA79NIseQeH4KWW`@l-EjGerIpX9&732aK!Q|pSyB|X!Stb(;pQR3W=**`k@;VL@ zY%W(W%KIC#sf~@NO+}=rZd2*{T_Cigq70W)x{pi9R`N|UBwV_^4!d(Y0V80AR_|kmoOEKH zJ5#&d7t_F}GxKt_jzWQ$z@Xy_o*woA$**85m6}ku#Q* zp>jTvtQ;DC;>&LMiLwyf3_od|hzk{F-?ui+6057LTf?{uyTUCg%n;YBHkpQo#E3ocaRXWcg;w1J!JXNsy@bJbE!;)mXdtJYW^1iKd+6 zA{(RtU+@Lemle=P0|U}l1-o;PYSeo{wwV2vqvrV1zu@&``8kwM#^tb>MTqTE1W`9goTQr{$O^>rsA0!YOoY2r zbg2^^9WR5`q1gtsGA3tVxiUW|2~Dzy6+Ky7vCB~Wy2qSjvZ|_uxQOgEo^;?TKx}BR z;Ni^Bcc9HFxCpH+hG`UR%5?RPj=)1^&Hh=~!wfYQ^(>xw5r*jbUHt$<_RFN?8r5mX zsf?H&6_2{AtG~W#?#ChOc|THdaejC4l-MUCX&Kb2Y5{OPTb!jm*iF%ZZIR(Vz78Vy(nqo|iS z)9&ja8pUt;_efaUVMwp7#(wd>njM6R45!SA;%S+@z3lsB;1^EvDz)oUvsXZHsU(Tl z4x25B_M5!=5#lsmf6@8T}_r` z40qO8mgw-tp0>qS-Ch+}6;cO3x8nwMvTz;^_F&<#o4GMltZa8118~Q6B|v^(J{)&5qFhCP zHA7cFQF&1wBB52wnfyul%KFO87}tDg)dirLb&9E#mputhpZnHxspDm3d;c@<-ga3_ zSE`3Tp6usnxhyH`Swpw=$aC$9_r^l4YEfC2;&;)PR(m>sSE8a{eQE(=*C*dsgG>cn z^?#AD92OxyQ$dcU$QAk*?_gc@9LXny_llPX?Z;?fQ9OPV>v2EN+4=71ZiCe09kiX;DJ*sx-9WelARC>Vn$gVs01D z-f?kRki5z~8I8x1WtwhW<{QkXu^wiswRCM*@YYOgeLa)+jjy4)>#`RY{`WG1;+3z; zed7$_KH(>jYJo(J<~!T6rSsp29px0i-D$%3@*F?_btoa{jo<-ru^Jjzku@i zmG83?u6_sz3cGm{<7(@d<;<5z*-MF^PIF47^G`_lNKi=;ap5@l$%1B+W@eLad6~Rm zLpn9iaoFjjc}DC+5zGtj4RYN=0?dPU#Fk&UL>M6nTJ*CyIpo)DFk%rcN<2 z_F%W`y@D?=SCX_(6ALRJ!)vSRLA@LknmCWJLIbSwCVVBl}yZ+Bib1dl-M>c&3Y3IjzERcdzA% z5l?ygOdDyB9SaVu4lv@nqgF`H&UgtY)eTup6=*~)Z;EJ>L+gr_K*}^j*@?h1`RVI* zfWCMri*KcLxG)B>)rP24x6UWpRa7LhNatc!4E-V(7yJ&_o-1r)P|A@4)z?hA ze8G~4!)xYpR>=b8L;=%-kxke$?r2*#jJ7OWR-75%S{u4Y-u4Ezw+1U5moo-1Ij{86 zhn6(STk_==bsIb#ZyoE^SpT!H)oe_%W<_ekZt|cVom1i*Jyu|56ZC#`+8$3L2Dq#K z57`E;*+ZwFYmYwv(7PD3OOv}?{c0nwVX7z7)bv(c<7~VGsE><@Xx{{!O;+li= zrkmJ674dZrPDn>KMJ00{)md~*-M-yN@ z8AJjOx3gUK?15j`$#XqAKFj~=lA?I)zLYoFb#vuTMA;U}?~Lw~hN>IM8DlN~zWA)J z@w^3D#RGM0!FnkioQkNE4m{NuasjL#UV(ZD1}U2Yx5pl|^K$Jd3!|T% zltd`*GG=3uHPS8d%v;+94&S%r?A6?kxSvC&^yrhgDrf7@7h`-!Xk!u6Ob|c$KOLkS zOuGM=ymh{N=t1JM$4;S6L2RhNBJi%1j@BWuwG?GXVq%?!aMjztI#Z4yz<4IzNCLB< zY%P(J0x-#Bm|mNAC%YL6O`C)`dGC=t=CYQ#dRVpX>O~`h5}#NNBvwEpIjRW z;ku&xX;mm^f>69Rc)`+iyc=-?pg3lNODrx=u6D9%h_6BdkT>dW_B^?@-&mcVn<~=BDVN>v-H}pw$M- zsFs{`JxC<{1te)((!ZOj?|PtO`qjCPGRT4JI1CczL$wC%U(fftDQNi7@R~YRKYb=N zwtH6n5cRdP<=Ot>Sj5@Eu@+(7DPPrITdrD&Nzv-01(aQZLh2vPmD~;o7%3l)G$X%x_u1(UTEUuGfPUWk9 zO9%YjSW}J+r1Nn*BsRfCwF12&BM$dnQBh47T~X1dBCAl*O;gk&qvp#@22tS6#kIUS zqfH;PZ0Zd`sEAjs;3UcBeB}u1dci@-kr@L6U`UA|(IjC`oLK?|ISE$;F|EMxMtyAw z*_@36aEc?~CkYK9GgSoG?Vwmji?W2&w=S1ldugbWn^8pYx)42%exzNpK~v9XnC8tM z%-;^`_`fqH=x*@xa%>(=H|*gJ?94ZC!+n0Qd=s8UG#=0y8NXbzaJtaM`KG?jqb}ao z8a;x$iJ>mQ;}RUQ`8CwxC}flSQlHIlHHXOvYa;~=E9|pZTGgUa3I6S(vRX=|Psb%( zux5^44?XP#zAk1MW=bn5c_}Dp?}4GGbsX)qt--rheNy6hrcE=r!?ng(Wk__-4f?w8 z6pLW3_txzOw6mpsHwdr}#`=xLkiJ)UJ?KmC6H+~_Lb!TyrABb zIo&ed4=3_4Kd`b2_0-ikRrqfskUONpj$-Ao?`EQ|&P?!^j>wr@?Z^F_X}J9G`8DIlUoFI|)i2FDZU*!d|GjR&i$2~L{W z=HB^3s_Iyt3BsI5k_ZDXK1V>;Y~Cx0A`FLzV8$RB`T)KMhds~5 zAUQxlX%b+bf}V*upNR~tpY;KhL?Iy~s@)g`13-*k!ka2Uj1DgB@Ua;%FOkw+46%yL zS<}Myj&W>LSVU)dS>V)bjx#U$;5D;eoS0!54e6AFL1;chfMH_S_Xd=F-pV@0X@5aJ z*EAf_RzOGKj>kg7Zo-7_%P8^g%}^79AFhTy9r-w2I!x=kZWyged53O;^0UzL(im9V z)m3R2127Kp>ctN1#ReIMo@#osz^xY#U<0j2v5u|;2@Xj^hDZGh1&@j`XqZr5;CH@j zW0j!k*`YJXCIHcaFScpbM3RTlE!!(hT3L6mRgxaLsWBqAjkOdFJ!V<0-FRGG~0HIU1{FP zqk98Of4Eh?)ackSB5E}~*T$qhCK^Imup6dJ;fvzt2W>0Plul|YN6M$s!_V3YuZ2YF zstv;p&Hi)=Y)KC&1etwugpnFDheszIG#nq8OJm5_Mu~5?DRRfp9qf>(bm=j&RgJk+ z(l{CeBgVMj1clAMfA2(WaFKdd=znz>IIkRBZUE3&!GN#G!!m|H()V1C*BQ^8RpUGr zMb?G4j7+D&H+GQUEhc$jd$yIJ-C`@FE_Qm#DF{k`{I+lz$6m8&TM?7Vvvw+piw{!C z>WT~EXzcIGC*jZS`YMb$D%tcDZ2>1G1K;w~TYn(JAo@s=u3JZ^EgRmWZStaV<;P`n z@wWSV!n3-A$38x$8*v*Ry6J>}DYx_Kk5Gh959}et>VDM-pQF;3I`CPMD}$f=E$jVu zRadwrOE3yurAk6MoCyr=AcilWF$Cr~5tkUTd)PHv(L}~dk2#9!rSsR)sR?Z>_DpIe zPRdTw109Qho}NHx2BsOixEe__bjH-axo*#^Slb7kSZ=%J4yE3m-DoPu$Bc^#H+?+o zaI5~2)jh&@AV%@iUT`KHVFgBqnM9KR(MBy+ZKJDaDi+PXvG591q=lY^>&_p=WK zG~3r7S*_R8PH=yOU(8Q4rF&bwaEpI={ddiKf1lvG9ba`D5;$ly^VMBv;`iCrgVMR_ zHEkg%fn<(~q$YFjYdCN8Z;sR@1n@lgfpzO>)MdM3*vOxp$zhUhv%Pb_B(LuKsWM>U zADCyp&t5yIx{nOR#7Pmdn6a>!@yx)-1@6@>aY@9;m?$u1Ba3F9bgYnX7ZBj=^%H?g z<-Jjsu?iZWqOz6}CrBcn7cjZ&`Qg{7q&H4{qIm_LTb<@@=R5<4lgg1x<;b^?XG9bn zhaTd>E2~A}*>I;<3JY;5+u*zEwJLN-t6#%F+!#+?z!CXX5GZ;hIRr0Q&cDN*DoaVt z{y)mzIw;O`-TLmv-QC^YgS!*lAwYmYaDqdCMnZ7cAi>?;Ew~drXmEE4{xzA|d(WIb z=hXN90Yz0q6;waeweD;E*197D6~w1~6gcmur&`t22~Z+p>v!=?wlg0=?%I-Rj^q9Q zWE&L~IdzNmio+AmJI0@$SPPD(u8Ta13*KCNVFs}pte3Vb3!e$bk%=&&OQ&6b!~KdV zC#f}TGuth!L_nBbVs{YBVG%sWM>sVoaM&P7;g}nas&9FmsQUVLh@_VDt}wl^kzo&U zXcG^A6=u7^&+i)3Q0NSuW!%w=sJUFNVlpg1_H3dQ57Ilr8GN1gM*AJ(m2lt&`=dY7 zRIq-3xMR`Vcz=_FYaF>J#Ooz~N9d;5UB>JYaz(H?RU(esrE-J|uiRRwn#X`Mhn8Ti zLDT&~de*pFNb0$#y&{NrSv$0k6JLQVHAT*8(p^JQ{2@!Vs+WAgU7|(v5l7PIY9_oz zVAaY=LJuQfLn4F>C1DUgUy8|05ijP8N)4ajwJFY3YQPwRT81MHv>P`pt8>35L0|Az zM}eTN`s7irx;U-+ryWn3#Am4jAF=Zs0U|!8Be9RW<$;;kDJ{svnTtL64R=*@i@tDW zHXlwr;B2>?buS@)0v^SnScQ!1CHnhF&iHh6SIP`uA=f{rkW1>4vDXZ;{h*hrCyf-B zGbAW9bigz5FL%2q>~Md4rR6A_UE!}HY?*gx46oiN_!!2;X*n7`=Q6ERnxb?Hpy!92-S~4r?o=VjVaAOv+$--+rPYug z9BOKci}HEZ)j#^)jJ@gAw;!-v0AGOp5hNi|N7rx%&|-_2U3LwBmqV$;9TLaRtgVl} z`N_j3S?h~EgfT%U7wG*NBpY)X!>9LQy8~PEdyyY0u%y)r^bsAkV5&7F$DRxsAsV3_ z4pJO@b@6+2+}5o9-WCR89I9PbUFTdCwbr9qUt!4gvo`3Zs(;Vg^}Zl#@i z>HX^}Ja=3~c;Mx$9+}@Z0@4D&d~SyGIAj1V&|?Y6YYSE2Lpw@cb3V&=daQ$J_{7W5RM-?x0e55A}tVofJv!(+zq z?~=U{tnAvoU*^K&uCWJ=7W%sj*ht^?btPA+6xOrnDAvH5!r*asCbRo?CIkDz<84YJ z@FUlrE!WW+3XEUkd~ZeUU-!;4l3>ahdTM?cEsf zsGI1ax{WRq1U17Z;dR0h=(^rs5;02~*U{Z$@2wpSkZW^ERiPlW$izose0=Y;4W*gV zj5s#1TC=h&%pCQdbX^RTLmm|pJ^tPy#OJ}DPs&IZ&Qj;5lm70sbI9uc#e#yqlDr(=!dA0Zgmf zHN%1k0Sy%Aw5UtqlR+XSC;IK$z_j+c2JoC7un8d#7yv{&je7z=G#s8c8j1~w?Pggs z6bQy!iUirh$}Wo@{$#x{>H}YHPfEcm-eyICS^SIPSvByxf2t^kJ=TOstop8JxL5G{ zP`k(D!Y`YV=q5GD_U>t06{+c{fbl~B^Rkh_BlOMTeV5%gm(DF{7aNi!-{4$iK27k$ znR-CdxeXc(8vv%B0C=?DcDxd_e=ocMcSwJBIxY!y_SqTsmUK&LsWbo=A{9A=R)K=f zsDLWtLu3wvS?To+Pk@F zhO}nm<3A?;dU;d2vQ{WEFEJ~DB#I}?bY1GzQ1{>{a1Orv*ki#{K_|Be4OVI$%e#T? z+iEZC|Go}Cp`p3Sxog}t2*T}2dyxjUO#CG8oE0Hb*i%y27hd73ODDvuw$AYH1UrTt z(!ay>r{na-UECzRq?9n=lO!9QBHfCIu~anl@6&+7fcGcn+Abfipid?HyE+Jq9#FE7 zHyE!i&>6_2geE6ORiBl6!{nV*bm)*yU(c$~Dlm}N%Xlj#d9A8Dg#nF#Y?LEe^%rsQ zB;ziIk$tWIWwOCn%b~nYP2DO?@}IZjU;be$UeG4@u1pE9-65prT==%l7>b0CG3oUZ!k-Rt^h*rRY^ zD_WLv`8$7(SJHhAez&&9nchP-yv}eOwN6VfTXJWv%XI)gB4e+p5#?jr!fs>5lYATM zPj|%*vTt1LTI{9<42oz-`V9@T=SGz~<(I<0&9`wWdjV|>Z>jiKfpT2I)|yu4Y#Ls& z-H3l#mg7_8SeqgD=_lUNUVC$w?g^`LMGoR+FN`RD3G2&wd%A%d5z34r)`!U$)J=Y8 z)wi5x-_n%D)J4iY4Y96*Uv-SY!buY@i<}$AtVYTyp^rjKx9iZCg0(c`&g%T;i(u{- z#(jmM0`gy1#qBWdb7kjkT1{_;;W8=@>QMHs@82lUqJFGcJoGBVOf$GDiaPc(RP;By z|9S2*@X4|JGad8&UCkNAZF+~@^#L)Nv5Lb&hPu}Qm_Ql`#du**NOkdL@}EPQ!kJ5holqGCZHE7Ag9gE= z3a4QF_}Sffa6z;(6u;Lqyjlwj287^uAwU#>Bq3VF!@c(0wLzS9e8tzFGUiS`gAVFQ zL>cam>a`>UgdsH)l%8mtBh-FT>RA%8Q!m?PQPz~({yi|o4kD$`6mhIsTwFA#8Fqp$ za}wj6HcGlnw?R6W%RNIDKFtM?0AP6kRX`$-ku_Q;@*(82ueya`h85g~zyl%fKtHDc zwA*Y;F!I{u@|x<38*bFYz2)X}zBI{$TUa0{89at5^V2ntfQ$*?@u2lGh%;Km4=DLg zGFYgX1AxkSvHlk*Xhk?NFOzt&5QCe+!_>3Fqm3wn9fo@a%evH(dGVpuchCD^l>e7E zu@$~1F+EnO;{cR`r__iMLp!n!(5()-@|XNpOt3D?SP?&k5%`g$OAR-8(ujZ@r0_C} z>ul3DzpWiI%M;G=Q2K;9@5y6wa2qnvN=kI(OUjNT?^f!3+OH#ICN2&Dzjb3#-%^5K-aFZ=2uM9WIt^UrHi%jcQFu%E3AJNjZM>!? zDYGFXC7vUICt(h%vgnmfae^D6iaI%dW`3!wsM4#qqDl$VEx})2#xE&r%-!1Xl=ivg zEXjKk)D#U)*Wlj+IMe%+ zVgEGs%h{f{TRXY=>};N*mLTbI`^q8*s)gj^yY!YTvP|FTE9bQ}r?Qd|fHhMDM<24&qNB#6i{25W z09l01?)fxs(u@JeGKgEpM3jBrm3V|8@4x;2$|k#_D_M>D8L>w^VkYqzL7IAur;+qb zE2NTjLL~e*RB(#@0u^*qFi)VF5$*B!9xS7oiDzrtVQI!3_cx*HkxwI8bUB;O{XrJk z^kB8Gy=IP735=M}Dmuyy^`)y>o*=hhLii@yXPJUnQ7FdhOkGyUWIkBjd^{dqyqKdC zvViI>#PVe{OJ(wBxyz|-b}No(x@zJ4{uTj+*TT0Ro==(kA4}Sb-&L@)NTrqU>szRFd<;p3;VO6L<{mvuPkw-n@;j0nn7cWCRjV}1 z)$6jY3Kp|?k;ZCv{H$8s>2Qss+smJ@myrYXZYr>kSbmj6nuUs^w^C*X=68xVWXW}Q z_DYh;0q<(s_@g>UGB8X$(^Lnc&EsBY&qQZ(qPvIVAM^b|T67-Pw*8wurLbduz-$r|sZ4}5D1Mp#1#Ex&D&}1Vm=sW+x_F6iakauhQrL`9nN-_4RdCLA zyx89klTAph!LOdPHhzj?OrEjjm|_fsH;sfhjSVJq6iWvDzitDR7NDBq4N>&>KKGDT zEjguny@6W&Uo3EQv%hQu{9d*JLuzTFC|n;SFf@tk#w(*(3l*rT#qLReLdymJuzxST z8riUQEw5CRMNDlw$=*F8b`Lx&59Zs*h=LaDGcuB~uCq-^%-S34LU=TKy;yLhexae9 zeod27$7AxVn|f^MeiL(?GvrJ{oG-PmQaSD}Xt#K=Id=pKu*+;kg$^2_Dsddm@`{rA3{7!5mmvMtn|KXK4-%`>TFzC**$AN zun}&Y@IPn#EE{;ngNXhzrEKGJ7i;vc6J&pg7($Af(fZ1k!GL*h}0$Co9^c+p#eR6)nWa6BLEP&`cO+&)1JLU)mOt_MW#)Y$gJIq4fq z9Dnqd@8td9LK%MeJO=FbCwUqE!mbT?f$=@`S$<@J+_2=5RQAxWmNv|E2M3ZWJ3<}( zyY%IZ7cH|W?hbo_ZScbTb7AcRD7~_9{3%1IRpe%E=+7~5&&n9BD&LME(D8=R@sz30 zv%7u1PTJlA{SjV!T>e7B{!+CnMH9-VMg%aqH}TXl&c)GDJq_q5KEJCGBTkB!6nPjA zQ+-E$ceS?0c|gW*f)IVO=;IEe@&EiZKy8Ze(zi_>cfl7C{Fc-<+XxjeA&bJq`CUdg zhAxrJVD~TrcCX{S&pcJIM0AzfI>VIY%6o5HU)jEw4T3tFTtA0Za5o)U09|Bn&bM=Mt< zmnLvV=&F%A)eKhOTD+`v7&m+`H~XG}5O|XH4Fyt{qS}GPz0^&h-;T2Xu zg!_GpAD_U}NrbR0fH1^Dy9w^^9o;296P6Au7VMDm^WNm-sI@mO| z#o%Mq(QwEbIb#((OQLLC=QKt6o8+KmgSAJ>12h+9&(D?)&cUVEdvXb+Tr9H(Ux~8q zPA<`qOAD7uroL=tzl(%9^VPb)%+x7PI*N7U#AoR*N<>$7;L}xlt`gt=$+}nTTvnVu zn!`k0PGReir%>$tz$r|q&pBnMw@-|dBnhQCTxdh86LnTQ@;YANTD7Y7_&wf5<%v4a zEgmDXIZh+dmfm$I*U#u5)iIm0c~-^bP*WO8%g876?_s6VB(pqVy$B~7g&wwu6dr`@ zC=<)OG;0b*@hR(>J~^NyMA$MILUGc%LRrh8&*23XB# zw10c@jBn&(S8Jkwm7}a4^EmCH9+qa2U=NDIXtB}da$7TX-0Rz(4OKExG}La|nDz{* zZ}Yi9b7`qSQN5XYUwh(Wq-mG5QOU5OD@}04u=-}nfc3|M@_hpG%n+k4Le=Rf!a7pV z(oiYh7_`Oe>iGFzR@EJ5_wG)giU?g!U&BOmP1WVEw=6cJ3fCmX`eZKciYB1 zTJa;ZwTv6k(vN7jw~7KN)sek8GTOw z1?nK zpeSjus8QPOBW~9H!}4lP;Rp3q@d&RGYu+kG?GXDD_ST@oGa}lG7_t)EBCnzCq)iGp zzJgkDxOaHVknfV^-|`h`$UV+O{+NcVImhYV{E$;4i)8Xa-yCzXn5{|M7!xuzxgDW0 z1Y6B6PRS;0xa{2dc7Q{g9T_P5g<8MhzheAFKwh@BI*C=7ye!l?+Xi z0H9Yu0Om4fw=(8FBv%?r$&-pENST2cYJLDUA&QqGfVQCw+AIExI=uW%s_YP6@`4YX z;EQ_8mc^cAJTToxy4E@7lF84^C>T_xuu~T9@8}DkF%O z9Spt9e4Awq$3brX{*I%3*n0vY>m45=4RMRz7v);8aaj6d91@ro)1+}MA`_qIGgKH7 zkDZEZe-E`%JCONi;`m>p;@;homFn{C@;uIYvNF@zdBv`R^fVhUG7ZAxO{A#Bi0G%nshw*mD^!)6{*}DAY>-g1NggjW&hi{ zlHwNuTUYFpZg(%%mCJuyS2&Xmw#P2po=Jm`g>PB-lYT<8?}#`H63J5hxu9}PO>tcO zy`Yl(_kyZOUR&w^aIY9{Pzapnw&voMq7@dlDieHUAJB(|8k%-Kg%72U#W!ZWSFVZ{ zlR*&YC&@33Yt^}9QO9=|P`5Fvf8!ecDs1jDEch^|`a2B>64{9IbGYNK{Iu{l;r2|H zgm|bU-#}cdKd4QCmC;2;?mQm5_*$ucI8N9j*q#=O^>8m?94LHWZ(`<^dUL&?d;wU- zOW#+vU+a}e)rDMsq?*nC2>gkd#K(9Nr#!M|c=~@TR{Fvh#kx{}boPHJ)=8s(E7lKS z#X6*vt&0tFNAT0*e|lG`FWwcI3!4qt79aHO%4j*q2f74DNO`f?FaFiT`mPd?_2X5n zhp%dmye3mQIm2Ax2pYa2r8V11;q(i{d8Wi0l?_ya!TcA*S@S2vskTiwALWBc(Ffj4 zJ*J;D=b)SW&?%@qSW*p~kYf)A4G1dPy_3)MK|b>1)7W3`>I+0Ks;cZs{27(fCGZ)5 z^+sSr5uv<=m%`PIbteCeXIsQMQX>&dn^Mh1%ULLRaJ3jNv}{OSbzQW@8cGT|Nt{Sj zHNe46vq+4HF!%tCVZ#Brdl@C8@j{amO+Z!l7Ri)0V}yI?Un}(cBJNX_^fVFC`tiKk$V9SY67#z>@z)68o96eS%_eX5vBO^!p=G)DtyohDv7SXUA7+o8Tgf zU*Qk4+kx{g!t*{fzf#FIQ4~)Vr{o_>w_Tk=sIA_cpiyXuBa(}wO=5_kQC!ZGi{I7% zgjw=nZv^tv1Q0sRnV&JVcw^XgGPA((u5}|fdN4Es8`k^?9T0_W8tgl-PLniE72aCQ zvGgtb5WamUz63mo=D=_jt}(>ZbeTGRO(nNn?PRBPjPHZC_`NM$#G&SlhF7_9!94^+ zjYr&F7JbjN^E?v?Lc$#`@Uup=nEL<&dEn>LSfQt7LS*7St~++TPe8}ckaj&;LOJxP z1=k?8C3aD|r`-M>kN_-g(fG1f)GE~0Kar9Dk|IcS!fFs z&D>CrC8K5YNe+>MA@Qq{w1&+*j}z+#e4vJ?Y%fZnMlDy)hgIi%L?syx?f)<-y)p`} zUpDWH685WI_ z%5-S)Y#L9UG}{rmeDftBMJ>$z8Qxre`3kzRUhrF=GChn=f3=Bbg3v>E6Ysk(TcVgu z!a5%h3_hN4jt~d5*T6Ydn3DG)uEUpgza8!^?Q{lOyI8A*R+vQu#ji)J33|8G$FILY z31rvPeWy`Kxj86b6b!J!mOd$_XPpZX6f94LO_BIWz0nOg{^aER(Z9c-rwbQ++ozTB z!YxJA*Hog6Q4zP{=P_b+VhB3L+xFA=ZQqk?n6{@uwWph>LG(>u-{=CmXD|M5(dWc! z?IH*9Cwt-`rjw|QEulr1dh)9K>bKgUOIGpyAyQrVd_Aax{E(GC${@uWxTPM%3NAaA zPnXnSlG=v-<2QcEHoF_NtH?+c)dn<+e4PiSNbBDWb@V5c;HDe0pF9y4nL&DK!wDU~ z%ok%AI@aLMx5or`Ofiyt!#k=1FUljx_p#HOxZC6B1ME0dqJ6n$3%g5CZe#Rr9a{|A z?y$LJOf0GrP`sm3FiAxX8yapZV^j76&F4`2%dvwDv3+bGirhS{P@TUP``F>Vv3(fC zM{*#o2t_7t;9>_6yuh=m`dmmx0!sxH@y%LrE)f%ZpZMx2OctrMnq9+d?Y!q=MW8KQ z6CU<(QW#1$$E}De%Y9!w@}d(76aV&ozQgKY*2E%Oefo1@H7p_FmIT^{=*r6vmaNGmiQ-4tx7-duf>2#!BJzj_J?knc`K(Zbt;P1_ne}Jq?+~ zkTmR}#g0U5!i&>&)dh`Of%_!(Urw@FKUki88Xu;Z(d>fDD={ti>4NSnq*!Owb-3Y^ z?7~Fyxq&2C=m&e%Ds7#j0yA60Oe~`zWVu>=)=+iwfkzNWnq%*E<-MPs@WPCoBn-3( zWg6629@F@DuD*}I8cMzs6OoT685lswGt!{d<9p1G`(orw+c%D~h||AVu2s62K8zYW zEh4JsazQa)Jf^epx^-C!%NI zo0!CGY5q{noX(Cas^F-}o5|K3-+qJ48OEARWCSTII4ctVLdW^cz@!pX-01`(ZWPSv z)aHE&NMkY}q+H}CKE5o1uR*~lNmFtdM0g$9PVs6(90rJ& zmoh$ol0+DRh>~ZE$=tD_xJ|AU^Rt83idE8+a}}Z~S|p>Bi~D>84^1ufmQ8KToS>PW zzC$qQ?tA#qFvl0&*4SZoXMVA(JiI7;EcNm29GuRoF-vuy!M^h?0`pLXk0sLhy$|lz z_%reRo3P_ayfZ(`pGnbM(-?ww%}0APTf?)rxrt_qn62M{)11u_44qk~{^wrYILA{H zWdZruZ7zDr4E7O)E(g+AwuA`{Hq%XpIUkY=T~eii&P4>D)tuEm4W-+xIWnSGu?&|o zy}t?^NWcO=gyo&o}?dT@;{VCM!u`c#0WudJ8#jOD)hZuVTE1X6< zPhaE9J2%hT}}l=|o5Z`rMy`QGSDA3G#?FNn9FP&?$Q@KUxc z+P=MFb^VNZKFnbCQOsx0;CKT0Hi1Qqlu`8TmyW=}Aw-_(6e)_KKN9#@M>S4&1#=doJ zz={HDi#BTkdVv9Abgw-qpqWuF%s|EjgcOQEcWGXj zxKMzl5U^n3M=&A`&z$Vtc?kKA6#0r$#){8hCLQ_!Rmb;(i=ccD41bgdx}qpzpfSXE zA!CSQG8jx1=qpoH3w6xsc3aU?#7`Me{uS6wj;wGMO~$M_0JsiWWHjL{XgE|f60+8y zyDCavz&FSsqq2=E3O+!eH9W8nY-F5a_W(8+3rW5BQoDa8G_+t3o&}&8l!`>>6G&$`fvIw37Lw%Rx-#7}0Uo zGRv>uvLwbUX;HQQ{4Q#hrz6HTNOYGt@A|z2DK@pz%FrVEI}bZ6eW?LPE;v0|!#K-; z%-6&?5UK{i#(H=;BEnO32~iYKF~usp0%s@Z!P&`BmMt+4ma#uiiBlBKB0I+g43?bZ zSP|P5McB;gr;GZM{e@-CIJLA8QS9H2K>R&@*+*-}o?TTA{`I)cu{5HPqwi*qm?`W; zCv0ZJo`#6H821Uy>k0Wcm99Psz$f9$2}pTsdlwZln)inmACHF>Pvz)H{fO!r7}yyL zu4WgJl-u~^23_X&KOs}%Btu?pE+o~W{o=R;tSDWeDqeJyBd*m>LFpSS>HBf1qn|2< zZ#BPs<%<7kcA>&N;4_KFSgf<5^P>Z2fMD=GIBH~#Cc3AD`r_g;HGvQX?CoRPn#J6n z#Ie404d$?x`uU+~J{MTn4&>D!mBB(HTW% zgH(DI4wg{};AX_Ue{$(`I55}{=YB_+Y|j1Oo!V5u(g0yva2Lu;+rs+QD3x(h&hycG zrLE2&Px(jz+qzD}kz+3Ba_VxplkfZ8$|Xdp=rDUTKYJeT8)@z5X8XOOx(1a?u!}Ud zE`R6Z!Biw8ZBCJ;8+BzUFzcqYx{=|UM<8@0BtiS5!Q1+_1QCA=wcLL^T$ywnXDPmj z_L?-s0>foOncX4Z#-D%p5#j@{IQE5g+S)PW!LJ^gf&gI>o#e|%7@2^LILa0;eiBl! zw9`&G;X<+h0=AY$ds3!;1X$Gk-C9~VrV*oc>~+~@;j39ZM&WmwE1(bVr)IB!PRM;p zR!;pbS$RAtcT?E%_mg^*a_N7c)N5z?S@rGVDIpZrGcBjYR zCi6m?!tgD-hDvkcEB%`^mH2}+m2Jwgq`hYj);RztE#1pY|43TG!x8?OwAA7TCoR9k z;*}FL6FL?uj!80$tC)5NPxt)nt@=h<6TO?(D^7mPIhMOtzD+0d@r!j4&uCV%e`fOr z_~U^$aYuupd4c__M$svZlC0A7gR$wt)_XU>dG~@z_3)b>3}Ba7N8q(1%3Z>Wt!GDo z46e(<9;JiX0Yxx*u(6b#3$ImN`enJ~?{u~e?dFAOhhT&i7s`d)aF| zj8OSu;TVdz#?3m}AA|D4^oraYR$Q}~09#|v0Izlhe{^$&B7`sGJ{OU~&j^H4Xg_$! zQ5>s%R@&l%!x-WQ9tj77viFC-kMyJ78G`RVLhe8v;WwV8Cm&65p?IT)jCSB; z4ki5+Tb%7~mxS$310!J4LZak^ClpraW9K?2t%pjOGJYd;7%D{Ac)iZYZEgO9 zPf8#|xI=MhWyltlkUZm1sf$WfHh7R+W+0a;%=mgbinod&%!IEWT4u*FaMwrB_?Ee2CfHGnN|yC_p4G0TOeO{`p5L zUS6Np2I^EtV^nTZ490cEqQDoDp{&Vu#vSy>?XWum^QX}v@~4GB^o$`u_2YqLp5Ayc zoqCvT9TWdqz5LEzw-74xu`TP7UKIR5-Z21v;Hn8rSUyv!RcnJcR1w}b>K>32B$s+AbDhJ(2!K@ zGHimd35t0fSJ;P*u>7XN7+-<+P-KjvvsV(cH|$PX>&|Hx93Wl90YJ^WI>>j>Bh3n? zKd@@bTbIS$xcI?$aN@RpHx?U+5iEty!n*K-1~S2cyDU`vp-p1UV42}K99X31=82)r z*3oHXLNbBws19v`Tx|?j-)?W{h7*aL(2TFaPvv3rjvu?gMyXZigcB$Uus}*$eXs}< z-Kb*8DE4+Cf&hSl4w3|f5Xb{U!l(q`FepE|N8|}nQ}lkOe*=}4+~j9F=0A~|1OVuY ze|ghhVz9F-;x7*&iUBg~f)wRYZiCK*1#NCX4kk1RY~M%Z2{w8ICwF8?a3*hnlaSeS z+Q8uU=yQCEI}Wib&fqu3a@S#yt;Ah9Pv(9rR$bh^H_uL+kDj1LQS6Akh}?*kwkld1 zRZ!=-_)y>bvA4njogbLV=W(1-p%S$*0vt`i4_uL>*eZ|lk5A(IA@7GjtV85Wf!+hB ztmuGKEU|*Xh$^YN2^%i+L9Rzh|C!wfF<}jm;gTq5SiXApn63& zSkW-R1o#ydh)hE&4+@YL8vU*W<&RobP%Yp+FClXIFC@0IL^QUtW1ynG98$6?_;F=B za_Xuzr-cGeQFPXV-gC!;-s8$;z~xgP-b4q|*SDeOLnp(t)dX_+rHJ%A1ifv9XLE`{ zGtNH-CWD`NHm8?=nGpjpn+6-PI@xKL<7--keRU&Xz4RR^1btq8{pZ&ZxOLRg&zl5( ztsyO2;N+L}NVI!3gX9NY9mOc0tdWU&x_<#kOTPi6*LS_9ilzgal>#$5L`6`%3};PX ziw0L7%E>R*_<;n&u_ie$zYtlhFGgIgRS;Q>hDtA9S`~Zm;f3c_p)5jx!D~4lpY+iX z4t4^(c{?7nsqhLq^k3p;I(hy8-AP-BXxbv zndY1kFpl1;+E==cURWdCHjz+E1pm&Oua`1azpMCLj*i4|Ku=h7^NZyE8qiJwO48qi z&MfteNwVIGan~y0`KA5iO#FQe!nyPY`iqx?E_t?w{U?WH9|$1_9m`;JWJo;&ULcxI zM`}y`Q{IfxBM$GM;$}jz(+d3eqT(mu0}bU@0m@X)5PC}SkT(7OpCQ#wbw*=bZ|}D( zOkIOxN&H2ZKKT72atTtw3aLgeMr^*nB3uQzgXdQF7rp*3FRdX=Bj<$)o+XP#?2$+9 zFQp-8)p_5e1#d^`O1~Fe=bb-;OG6Y_>FPEPlwA8_X5}aTaFq;|Z?0wU>q~r>8!o5$ z7t1E6+K>3d7`iKlP343*hAIZ`uXI-H66nLkQ;|L zDLWzOyf9|)nfrGK$W*so<&IXJiEg5cAYP#e^RV7S%_1pMbHCTJA2%Km@lB-iotKvV z1R>HtRDVDm{ipiF7S@~hE0}%a{h#_YSXPx*vEKH0E4dq-fx^d4m(Ah*Z6v)XAx7oo zr|}!%8yVp{Obe&ak;h_Ou}XL9X_UOis(}}aPiYUcJx9KaA!i|?h~C_b`XCRv>lQ;;UKo-qA$o`OGo2%O`dXp04E~LCI6Fz`VZ_}Q1~G) zp9>E1pV248`u|3s+QJ>-|I_~w67hfbf5@(q)}w`9T4K{@v$>j6{uAB&2!%o>ArNuo zxZ5oMNrn{#2kBtf8j2)2BJzH}V)`B}vi}m1Ly^)BgR?L1`~d13_~aY=fP2{FVgvbC zO{#VUc8=8IntcA-IqI5uagNxPctf(q6k*W*CL?%Jf<1F z#Fy9zvZ9mj$>Ip1Gms2_af;3cd(ojuYeYkc>Q#ST{$`s(mcRGrZ?)+;wVD&L?k-5@ zR+d?ox(#0nP&;#I;xw+rZ23+W#YgYDjh@vzBH6%;9ihz=hgy`8mO%guhJgieu)C-f zztrIcy?a34zw4LsV5%02lgwg}DR)v-$>Fs%I5_g%%f! zIrqGmpI{KgYD=TXpdvNLAFU78T8vNO)x*l&%vF;t(L3|%^v%1<_EuGvqkPMkMVti+ zHeh5Pihrle1GVn8@GUomDmqhqLAy%3@8PFz@ssR{@m7^}N1UOkt)C)@pNxwH2lLd> zf%PhfuUHeFlZ-kU=&5}7OEw!kYry@VvIJqjMMCVP>8JVzLf1Z<;BYHmx<^c}1|^|5N0E!#h2GyN#v zbF|$`ELNqm6QAX%r-hz%sBZZ=ct9}iN7Y`vZ>MvbdvWxWwWP)ybPet+dIEC9ur@0B z8)L~bZPL9Gwg;zh*tVq1tLZDutqWgmxy7OB`?87P+T4h}J({V!5LezYYwu(Vc5g@N zx6oInCQsX#6LN^`h}H3^WlRK!I|Td2ef~AvcDsw8v0hQdTS~NLl&Le^wuZ*6A>G(z znN?i~9gO2|Ngj+NpK&FPn6Oo}lyd1LwtK`{kQtVW56wG5(qtv6MkjzEZVx|}xSb}T zlKiR?`g(Ae*T)g`CYxpNBBGK5YV#o|7YX55gU^cmxfJ+uV{v*fEzf}#)Yj^@=z z!h)+1y=n}^Sv+-KEC<^=40613uvl{fpE-_-V+m}uxr?Qh7hE-Qlo9J0D-HdQ7(6;% z6SOq(-lFm0=FoQ)c{AXdtVZR33sF_UJbAu0rAFo#zt92wvIg+jaskMGvRsnCd>k@hN zVv@9C_A+?suQ(`DlYq=SVkpHQSaDB61Z$|8kVp=-(MW5SS^}M_pd4k~NPj~6bh15c zP%bzECI^W9fC|d3Emn&*Q4RA zoBN79gojS~;gex7H}rrWZuJx6ThU8$m|*S_Vl+mw*@7^guCj*LRGRWY z6@*MI(V|TtU4mAEI6bOw2RXdGA^QEcan${sfz(HT1<{Afco+Zy_Aj^n!64>(^t zIYJYY>lfbYDGqww49jtzhzV&|oVy~m^b+L|^d1tPT^gVJ{;}-e0@QvNSawNVN- zY}gp&4sDV0@5 zJ^V3GzQz$O^m%q4hoT#N4#oljxKpoO@)zzddC+$WT#3ohA1~ayV6T7Y3-`Wd|H8eG z{}=aO+KUBr3}Lq3<-xyib#y5(;j9jKc@r)@&#Yn*%2!Zik`p@f2MHg)mGkSKBazEz z16NAP16)Gj!A-xIIQ_xbN7I|QmtUF0$S`o)<#%!3U4#*qNAhgfXh^6&D1f4U|DrFx|gx|bTc<685XjwiFHVRD2ud+2KUb`4Q zHy`Y+ko+4svXEl){Lx@&p_=>9ESNY=ne*_Y+34R3&lS;UJvZw_$87J#hQyBMXw|2+ zAtLL*?{5I8^pPH42jQp1p1q}du&L)fhE#6N&k+Y3hoa?eHdFsY%3Iw0y(sQ96e|69 zGhu&M*QLMdPpEed%f!wfyE>L$Li4j$f8mb6$vBZ+-M_2Fiq(VkLvrg4qmG6K+`o3k zp`Wa}YasT^L^PJV?--0nL4i{bb_;qk#SysoM&te)wYo)Q<{s;{)ow`m3#)rO1#wOa0Hm1XW;&?8aWeijhu9c@qBZeRP3|AkXzo|pD2>=-%upAEU=)Q ziLLqvK`99qlojP@Vten@xO@iZv;Gy4oA@&#H-vchbg7S4HLA^dykY>vi=exZXv$67 zK@QJ}A?x_8!PFvs&eaq`S0z3>;S?-k-g8l6XYsPaX_x>n+LC z=!8qlDoLxJLMekI9L}d{0iinBeoZ?u_NLBzFs?zqfBhPDvXfs@>wf0FW^(Nrtp!`B zx4|J7vy;Rsb2!6=K$UD6YgXEtLv^Tt_JH66HSuM+<`(xgokB>7?1fzp!uOGa=W!h> zZl)`gFa-*Kh7O~YI2Ve-i;PuDJmrwU*v@dK&>YXLB~HbmZ|%wpo2E9)bS<$$GEF$J zPFCA$+Xb>^8rNL}Bz@DO^UTz39pgMlnZb3-+r)=+&kPfFKQ1;6)iLnaA#%cF_b?>#P7iZu?fMHhZ?7T5bGutOVVSIa-FFmRJ?!3;DMU6gc>dmC^egKF#G?$ZlGt70|{8`?B-^AWO%2Of$oRmnHKzHUyAbopt>p5NqHBJ63 z_d0kQu5J0rl*PQXEyR>XdCOfvip@eA%C04Kqz>Ag046ShCjt01|7)fBi5%B$S!aobXxyiMR^o4*Z?SR4|2gd!1*A z7 zl2eQJNW~iZ1*;Rt3Zu^x<~5;SmtF7enV8PwG1iP@@vhKP5*y1U^{AYG)xmhTnmMKZ zO6pmD9=&plg4|eP`C>wMCqoEJkFq+j%O;djd&N$>FEBfEN^#R7Yh!oyq51Vb6tNry zo79s7k)Afp)o9-Y#0V>uhf1`gFm1qpO4Aqx1RcWwpj&ID zf58VVV0|oag}!mLbH)kj@?NLLXYY~}U5N-E3}v??h=l&#X(g5!(nA{fyowl}QOh!&+z8rr^t{-9_~2el4h zSw=Q!j>AKMa0~_e_-v`!lVI#WIPL}ehwA}ufbt>CiTDgV904d>HWB9F&ma@yy6nCn zKfAqj9b^2)jv9RGdcOt{(0k)IH%`3Bt<=W&fMI|Rj4Sepzd3*ja(ERX(eFH{Ce|3A z4*d2Hi|#pyE&=p-P(+tNiAW=mC4tH#f~QntyTAMwsoFb1|+u-EwhzmG*>qI^5imqbk$n`4x{e!6BZ4EFeuUHTu0SE#h zamNFYaOGT~@fgM(LQ!l*Mu@s@_?!*C#G3mf}_h=FfoC^z(!FxIBYQtw=mC2evVf7UKB{QIkBS1d~0@9+N%iw1x<5|9ibE z6E5q59xjWKO!6q6l4YxkG458CYg0|pf}8}S#`yK}fEIRA96s{*iv=;eag&Ogki6+u~p3 z>RoT^yioE745;p63pPFs)dN=v8w(JbYNItFv-&+37|K|fFwgRzbE0q6=@%^HbJS)u zX%84OVj;5G$VfL>c9qz}?zcWFr*GwPYbw@J74)+&v4fkST88DD$vw_fRdrt6d2?{^ zhcBgF6+z(YN3mmWobEpsI2echvB-otdns^O^YrOmsQ`B$6CbqXpE%lQ`X24BWc)SQ zIgj_kT+)%klktwxr{8fiST$C~GR;d#gZm(?K)Y=@R3;|*hZpMo&A`$L|YF3S}&HtD}O zRu1R*KiWMD_xbY`Bwyfs>@A4WEOduMf}ZE&HuNgJe-z8ZA4`Rejsat z&YA9mz0K3E`(6~t_}_j{3{fi7=zGdr1MYwP4#odR-SKJnhu@*C4SkxII}e6TyU2iAYs9n2O=KjaQ4e4_ue+`;?*UG9M54=+L; zdNX-(dN+@jX)Jfbt@2|0ui~K-|9@~m!La@b z--rcjHL`}fv%u4Q)BtgW@U6uP`8)0$*XBAQ8nQAOPn~`bbC>j(%Q`g#1gVwtokS@-7>7taVO=%nu{0Qbg^Awk0iQBojIEy$>q!>3767ht1uKvpFm% z=;8MCxc4YGbFW9`aMbxaZO#TINiW6dk51b4F@nC@bp1gjPQz;1^EBuThRTr+;wnmZ zWJ4V(4kg#m6C{%O~Dm50)b!PkM#+So7`$R*zg2;ac1~$S-xLq-n!dWe-bVa;- zyQfgan|j%5qLA60_#Smu!YY~VHTTGv$8Q7ao8_a|8k*{}JidKrvs6R3tbyvf#MZ-QSSrfV5! zi8yC1wB4;!P@Z70N=Qz_#DT;7XWi1fZ*L@V&1{|d0tF~Bt&6DsZ7S!z%KTUZ*~^hw zN*4)O%%w!0RUItM8d;xQmAMHYwT%ub>Xb)ApAe+7SgE0xag$@*>d!%*z9_d4ik zz*eB44B2JdE^BA(D4X-jPjGb6I|W&c`$U!51sAwnA_gj1v&@}@wdov-3DiH(+=xD# zt6P{t$1__^HPtFY5CzP4i5N@tj3yYd_3k`8J1mUgg34&D$oo z7k0f>f^)jfgL9g9MCT#9^ZFd(`S@|HcAMP4sB1a^4hzOGrwf*hiwNlSi!ZPWn)CAe zE-z!?B@nsse3Covrs8iK3DB?^beb0b`>qW~+&|2)`P=H2TLo(0XdT%inXkH)N-**O z_2gWxaCT9M(_VxhI_pEe)UIsbuI#TPi=MUnq1NZ(Bl^^XZ&u-5Hqb75?Tsi`8%sr1 z1VHJE#`B3{?&H2yZr0(C@4=BMW3COs8J!NVckZqoVWlAFVMt_}^v-r2Iwq2^9UiWz zEKMKWL8w=~ z6kYBzCK83He-GjgB*&|~LuZ1r&x|l;j5~k)u@ZU1CgFU7!FHb@vwY|2`~|%`?6ntd zIAO|i-Rjc==Cn$LPfbP%aB?oN$}Q)?c8Omb>YRcwl`H77PuAf@L&VPYY0;Ee*dewb z#5*A~Ll_a3Q04T`4KtL#LLSE?xr(;)wx!P>P{|Yc(KlyX7v<^v%z@B?%~6$_0cBe? z&`|=!WDL5z6SiN?ukw4uS&k@LpCOFQ<3iVmW`nQiLZ2f?8x;LD^Y;fkA;Rd&>D2nB zUa)b@mtHc{$?uB2oW?dt2SQn>huI3GUV&-jLL02M|6G-$1M^;eZ>IoXr>T=qoZzEK zq|-X6cjT3>n#%;${7~8=EwDj~&s6R%41*f|((@4si?LZcNN_U@DtS5htO+n7RqZ>L zD-xcmWqOC~T2!1GQq1b8z-3C~ued}T2X9kd1q}Y^`?ZZt!>*WZY%XQVY%njPo7Slm zff}gp&)`Oj;zNqYMlSP~`-c*BQ2r#OU}|ARM-s)teStV|{nqO!vG|tj7%I;{V;(KN zyExyi#ZG?|Ja7D?%CJVpAbw;^Yf`JKp?d-*vED6vQMh2`EAD>!h?*G zWW9j`T&3K=bu7+^1$)e&iPOdxniOoL6qTz++{k$NL8|o3WYSV>o+laNB?E7UQpR-H zl%+|Pfx2cG-pd5$sZI*hS6P~ov%~kT&eG=S5&b-I6XU*B^ZXOZilDsQmuGVD9r>lD z=qc;N`Nyu92pRvBU|o2t1H6*n)mz%R!uPZ+4i~QI8##fo&tGzPw&}pYU*7Po5cYDh`>sI^KgIB^Br8 z#YZ_SkWl|t&uj`TsWIF*^L<}~jO5)KdcDcIAN9hgz9+7WlR?ByF=0OqD<|3MBnVNL zHJ*y%)Y~9%g)bn*Q_1_jRxl&`*1UOH0v5o(MUzqPCiwAOFta^# zb#&az^R{ip$@>U*%QFKBiI?CvXVhA4yGuGkbtrQ=I*Ar3tANNS%8a{J$JQ5rSJ8?t zEOK#&-*Glsn*pC3NM)%`iH>2A<=rWXbFN}!T2T3!D^%$^|4@t>{nRUAi++dFvQBBj zv}PI`tCG5Orz%?;8=WCgD8f7(f|g`6jQbh4gC>w2o{ik3%L&9;b7#B|DKq=*C&`_{)U{k^Gd4NfMkF0;!nO` z*G(ZjfBu#ib?;;fBY)$cp1;WGdNg@HP=36<%((b}tFdD#R36}o`#pg7vj#obeAiSI z5hW0q@Mq3@2ITJILS*bPm6wJi1gqyB&swN_KFC*K&keti%_|3uL=`Je4-bRaDY4;7 z*w)k*@n(@?>(3w5Tc^lNd;p63B%4!QFCvY)pby#`2^u1u+x;#HL?LsfJ5gNk;nYH^ ztC^>6{6Fq56f8DakdfD6%^FmD9c`AtkM-|cZStSs{WYMA7>e0OKBOST&^E?{IN=p#uZVs|Zq|)ovF}WmLbeWyv9y}a2r-_*TqP{( zT=67{O{xXET}H)?_iwdL zi%NE{CA)F%m1zTol}X*AGDn3H-_3;^^KQr&PJi;CYM8=NAFeMr5U@x zTy;XdF&ebGbsQ1k+)-^P2B}<- zkUA^Zf^TA`E}rm3B}axr^W^Kx-A4?w1n~85>d?fP$Be`hPfDa!@_sFNaTXILY@{Ne zFi&&$iS+q&xia>b=YGBdn{T7hb^b#=E^ln&CKSg)6!bNPfQOr_^587%V0Fcg$UhNbgO^*sL43h(bSAp|q#?dJmPRtw+{F zy8I1^X>i8%OGN=v>qbCXWxMRF^*J;eVz`~~eSSvwHXmxqEcQ--caR^NK=4==VAZaCJcO zT&0*6(ghri7VQ?#I?Dr&>SZxqdL?i#qir_B>Eig>Bmm)baZ|r1&@|~}q$y>YSvt49 zn1-JRk>cM*6o5UZf0{n)Hqg7GR}qQ*;KX|+r+NR z*qr%vuai}TI{c{v5s&|7SSF*A1*o|Tw ztX9J)Ps~mEt#YfXn*#Z;oj-sM8dsEdq7`q83QPx_=*B$qb@{K*RZ=9vHch2Q6mbO3 zJ;RQL`g8nmjm2D6JeC~BRrw%7>k1-knSXb@lx<3Z;d(ZusX_kh;e@m?;Xhvmw(2^$ zQGu->`*He49Cv;58c2TFZQ~F+eSl40Q4S~+x(YEJ(nhfb6so@->a+6G@#FnNBkjjg zAT5C4Xwj~3{~Q=bOPuXEz?{I5bKC*UNCHLwb26>l~1`O;t0hWt+5#mE8Ea5d`yFA-g2&FhoP@veGs#qkELoxqeboNQG!tLb#fE@d2+B+&u!Gp z*x{Lf4z)yGI|YQB29XH<$)UucqUG17&$@tYH9idW#pD7-lQBWz*d^Qi{Zkp61~ zRXANxO$MR|v3$c&x##%IXN2*7#LVj4s4PmaUB?<~ar7M#R6pP7_tQ@v#{1OI;lgio z^cW{*)n%{u3fE_pfa<2;M?|^bLzj)i?88=BI5)8Xsx+ z^ADZkH4T)I+0lljpJ`usWY#|Urp1dHSQ&NzKp9m|2w#hKH-7uO=KA!?tLD|{1i(ZZ zjPoZUpT?ZXR;#ll;NePeXy9^Tx)jXh!S?*RV8XXQYE$*qm;3zlV=Tby&x;&M==Ud% z9`$L>!oL?kdg-Za$Lm6DN7|G)%e)qm0YMUjq-$%pA^~tC`v_}mDEqkskUUb2CZ_E{ zA%GgN7rJxkxh56D~?*(J?>mCGRhlolACZ1iuRDq zxkty**zG{8Z~6kOAfZ(d+YCxhz=(R)z(V>panqb5$sFq)Z5`dmG<8^?n3)zia`w(M zlzQfJy0|6I?s_^u+uRD0N0c4M$Dzasu1#$-l7sJO|3$t%<32taFYXE8=e%6B?8C-} zf5CT!`0>kX`jUj1%R#lYlkj($r^0=8LE`=H$s2QhWtUCT+lH!t%y(tcdbR$-0&TX9Mw8IVa)&U#&4L*t5-Bb3iad)3=71%>c?O8RF^x&y3m zTO;2f{qC+U4{r{Ytnz$ajL6`kgk&mrRfAzIGA6w9Yass`TBY7$+Ej8Jp-Oiuj7=IK zWyeLEo)}6@eW@teynKk4rn=ko@6LU{+^Q8JzW;CN`|%-HpN<@ULqjo>zZ7>n8!QhA zCCJoUU5+Z@7sL{i?{j+oBHbQ-J_jJ=l0A5%7`lJcnFx{V!Dyhzh>WaO>@?gVJytQI zEUYZiy55tR3UOY)_z&(qs8^b>_?LV4(*GCtYDfHHJ$__voN3 zSf6k2>`M=L*X!Y9JM7#^PNTK;7+oX#2|hM1yua?|LE=Np0aAGbvX$6s;x{^Dbapc90xxK_ju zqr71lr>zTdn*6}fU4IZptYzFt!qlg*GjiwOf?v1Go1<4-xx;2!XN;|!#ze4HPZB0w z+=TD{2hGsX9(wbp{iDxD)8S6v!95CiY^p~z%78VB@qN!0n^K>VUdrZk&=2y&hQUIf zk*S^7fPMyT16T1VW6sNtCe*qq4jy9?tEj ztl$Gp()Kbz`?(P(T#}kmv(ytauDfEf-tPolpWvi&^2)#0daiXl67K!7uI4XF7Rpwq z{KJU0k54)sZLteHj5)Qg1&f$A5|>|#D&L9a%tPiRl{17ySm!K83HmrWqy~11uQAs& z4CUuiXVCM^QpIQeZV-~fs@YLdUN*@QoH;eQmdhQV(r2>HJ+EU>{#a4psr#*zT&E~! z&k3a>D^<-2dadjFd4HA5wO#Pa-6Q2fcJe%{ZPkF`*EINP9b=SO=67syX>XEZB;4D4BAb|Z}H#BU`>p%WSd&Xvdx!| z*`B_0R(peVd$SV$nMb=*x!|{_n{EgDKxLA=_veQ{icfMakF2VQDSIk|Pp#y4eRJhI zi7w+BgJg@50)`qd!t(kVO1%$By}dGjE?P;?%a3-cPg-no=El_%+tfrxcpXg2kQk~F zXsdA~iQ&=G*&W=L9Ano2mcJY~JdCZSTG3-LQoPo+NLc}t^2x`qokskzr(v}qWP>W4 zdO`-vA{hn79rdiwhpQJCIw`7Esp(IyC0d5t!{L^OIkTXjUR9ZkKfTx_Z4QFIaNNLz zg!EpXF#UU{>2TpzAhUTX`}Y ziLu~pH!!`=9Cx$~O3`OwK@Z{32tXDTo8iUKCb7$#H^L0DGa;?kORn&ut(GqT+ByxF z`ksE&%EQ+_MuO!O^pa*!-j@R9MvOnxFzISw=fOQSzpzvVY7>bTy#T%xU|MN{WtLOV zs=)oB_HShLS0FHkNENu?E+CY3bsubH#-j%>8(iuZSa||LAOA_Nwoy0%sul!4`z(ri z)Xu%4Pd1YMEQ-4!TzLjDgXt8p3`y!+9z{GQW5PSr@C$7<*@p6f@@iL~*~(sT*yoqc z??)6>ACxkXBt9q?fe(gLC|m>AVResKjbOf0I8pK+V7(W3?aJ3s>Hhu!CMsaY~?1>7vS|<)Gl)G4MYxVg@#);fT>RB|% zaIL1uu&JOlha6Bec;5OPXK>?k#&+Q?B$<5P^OTH1?C|I(q9R9F2yR;r=LmgjB{0`^ zS4U+oU-NdD)WhuT`>GC8M2|zQ%-e^HRtlgzTs87sna2H~;Fdc1=vBTYgCfS7tg_+s z-8AxYI4Gp&NU=);RLJ?s(f6exO1$BKhtO()%^#MxfkBLZZebFYg5e4P*^G;W>H8bE ztY0=8*v><^wgbLuHy;6^a{aFgnswVGW4pjR>Tp_KV5ljfk*jw9_A+b&i{W zoFI4to;m6zk;}0rZuRt=l7&ci zAymH&iT^OLS;l^gkPPW7 z*+9!hRxigtRQ|<@T!6<3iB~^`J58e%UkvJRTtp3PowE)0$c+IVYvvbO&+$RPo_t;K zbk0C1i<2s%YmsH%zma1_G@8PX$gv^2#Z7uBil*|p)pi}jn+on74M#f72m{@tOyCj_ zQ}&0q?1ZWcbvnw2SRvQcD!fp(E?PwrV9RL*$yy?SoSfSR#)?z@2fU*sjAOL45{!vNLEA@L^$)m1 z!*{w?Tqj8XkCe_ZYj4|X!d~=3Ug>*@+sZi~9z$Cl$RX4OCYjM6xkv=q&VQ=c;5u=n zJr-t!K;q?}qn@7>*B4r-yx5FB2?@&6)QyA|8|MBrA1X+9VhUTt6*uvm3S~omNkiuL z$b~oZp;J(NT>a1~{4@=gu%rI-T2ZLdx~%+*B5bb6G*NyuGwJmQ7j6SBCT+2g1p5J~ z{U4hG@rww@!yH##1*U<`lcvU;HgmVU&aL5ijh^`F1@(g6vw;I9_(Tszmn7`FViMDD zABo_zYMK007u`IqNv@`GzjQ+%%&wg8q#+JPXN+quZKJmQLY$+emP7sN(G~Gi1|t)o zCPh}epb5dsZ?mKuO*B5rQi_;*Dl%Jpkbz@wBr#OlGDq{scG2M%c?qzl)1I%?wiH&Q z7aD$JHfI3P4xdWQB}m#m8I>&MoU7!RbVA|C(B6Rm=;lqrp*sP=jEk-fcaL!)ui!Dw zUCh%H`9r+0xe)x1c){Qw@q+4yctP(&yinPhDvvCBnj~MwBexa82JQA^w(sCkL#9+W z)X*I_4NGB^_UR9UV?q{0Fr^@~{m2h83t-O7m5J9$7mX1oX_tOx6 zVb_aP?f$i9=+8pQ{^R#F_tPyOv#kqwa~lhMXRql=4<)y2M4fpqM878?>I^CCR(D(zI1_b7Q+1V}MX;`z)fB^i7v3S0qGb@e?$1 zl4C_2Kl$teP$v($3*sxWBnuFz9u>Ba-jF1pPgGkbNgvBZbv zIL4QbMpAZ3EWV8_F@{{mfZnke?@C5gijIAE@~Kfa8gmMRh9qkATSg>(?fcl5TKyUK zhSi7JRLY$r=txX2_{WqefGUZznKl&NtVAY0yF2WSw3ey{gK&?Qnx?9*MB`YlF!l|< z74@&>TklXGSytPC`7A)jgr~-NAAIW=#CE%}_B1#wXO-AxB3;JE+uaOcSp%grfwu>lc9^D zAngUxyQ|`L{FbK&9wtnn_Z057PfKqGH^ZnLki)^m3PVj`&&@ANc+c51qfb?F9A-Zw!fWc1&b54Paa`o~9Bri6V>rGhWI^I0(kx zFr+mQ%I&tz#NRPd2Ld%7y%!P9%y;&Z3YoS90WYPOVC)H9#_h0E`MDmQvJiOHt{7!J z?^cvCFQ|?{_@IW6?~P+1w@*FiQpPgWE|td`x$IlyJM!Gj7CN8q!h2A-r+`%A#A)TE z<16AF6MC+5uGV(0ynaW<9~Ye#a-l%4D1LVpnna(Y=>^LO0{2>_7=YuS3qbFiBgtl9 zB4?^oC{|&b0RQX6^)LJJ;)ffb%{iz+rcXqh`HP6>F!XID4XK@w1XLwTEWc>UG>D$Y z2QJf35he&l8MTw{X$ybM;rJ0*7<6`YQ<(-9dg(dIX`&+%DLUVfj+;%g7&~){4@?qn z8x6KK7rvt1<#COIx5*j7@Ne$mZ&XuXM!^sC?@Jx84*Ty_<8P};5BXNQxh+#yJKA3^ z@Ny2WMNGQl$NJSeiFEeC0=_>}!4dEU{aWu1JBU)m7JoY!h>xu!AEsH7ZZBE!1a!z* zmhY-Vn1BBI+Db_FL?4JYE%CCPzV#?&Hv!=)TJ_X~u9A>2#?8CL(Kv%eDHvgiZ1bfie2A1Gn}xX@PVs8THMJ9G21A<4g=4w za7?D?*Vn3c-B&`&SkCt5!nrp5AG$&w7evivxvQf_I4D4O)bc_02e)taPtV{{%W0WA zCXqLdm4Fc2*Qul9S|FJFx^nYf;qP$x7m?}{`>JmJrgydV4lU&Ao?lIG&O6$K9J@Js zTzaQdz9(bgt24m@W<8JB!>#XNIwG(a?@{x00G2YxlZeYPOq&6Bko%$~!pV~vlII;F zoT(DQyZUJ1#S;gPJ$}qP)*pGYGJW*9&c;`xk_3IR>aaxG0LX7H6G8DA5v^w=5Cm!_ zTp18-GUOpRtb23W-6P72@dQvDaG$Y0{)`2rbf5Crz`t5tXK(V(wsR!Tdn6=lJ_)(P z3T;Ytd>$_CPegdV03Ke@RqVl6fr5>gPMb*}c6d=pSS3nBJg>D`e%z@z^dShnQN^vG zgm9BQ4a1sZg?WdE0?BBD*Wp1l*!OGCU!+9w_t41%kXncJ3K5eDBz_fmbD_%%hwlfx zKPvS~S0x0_Yzh(a<7KE9dsBqnH)Dp8>pG?YAHvx~MmY{-=4RO(?i~6vbj074Syd^- zf$2dEPa?!5mUrNHtLZVYqM!#OxO=g(v1lB^ZAOMulhB@SQ~vqe=j*48@m)Rd5cuTH zL=xE9k~qVTMor<6Z0R`a5D`zLKM^g@*{Qzr%lJ1n%8Z+9>e}xS73X%pz{IhrxI5kP zaCWZ`(m6$MA{Y{q7K~=q89z^;oA@XOxLIA_|zosj1OBh z`8i<7Uc+ExsW}UI`*XXy{1I`9&8>x7{zw~}%bIJFdK?naVP`?PiWuGbdC^ zCG%73QKsk%F~%WpGWuY~7_5r`{4*!N$xl5JwcSPfXPoGv+UC`FK%L0=w=l_DV9u;V|EIJCI9mB*=AL4{NBjE2;i*$DbV7A#-SpQcYJ zF#FOWEAdP{sSABv;zGbr*n3^a%Dogy1Q8CR<^>V6UZ4sv6{GqAA6sjfBtjrq4;-I?0# z*eOwgY-8g12O+^T?veiAU()PjWdun!wtFGi+h31qo>G}IV#_(M6^E04bJ4LX$Kn@H zvR8hw%ib>;Fx1T5%c6$B&foR{nrX`kinKii)|A4t$yKC( zUm(mlag)@QA5=2?quL@SR75nd(jjKIFVKvYTq24H&lmmJX0EP8LX#7a{H~P7NI&l0 z?%2G#ZTaU{EF)j^-0ig8PuL!3RkJBl1qOzM3!3=v^Q2f%VBXrFtCJ$*4EPnk zj0(XU+_pGck@wJ;OZGo6=>j^%E}`mkKArQ;&KZDL@9J(m{0=eOL?y5G!t)cTKf)Qd z>-Ay*yt8g=N9<~s(F&5g7@&wy640Irn%f{S{&ADaCv0*^eN2!paEvPs(erhj%FQcY z!R`p^`)_1AsCOdIfdm;?LWyCMYrjX1t;kWbk8}@m?Tc&@yQNh$zNt?KoY1RO$ zeN$$1xpIF{0QaMhsmw|e309Z-?Xc0e08^0#{_BVN=DLW{B`I^-17pXWCrkAyvI^WF zlY?YaoHzYE#+e&~QSxc+u>}6}TY-+iHOnQuvPEr@-4F(Amz?k`z*{fG0O_y%jZ5L5 zVF8m@oUh}b8&K@<5)AOT7JC8ZKG)56?{lk*Tj^qlN6kj zozEpEA*bimx{*1~e@f-;O1`0sfSX;)akH@&CS-!9U?rxJ6RpZr!X#49e;pwO?Zac? zt)}T(2iYq+m@C|S%cIhV+9+}df9I9L$q8PUC6}L&wSas=I}&~Zw!Z|# z1sodLPIkXuOm)c`g4z4$ScEhEdb!_B;m^j;rdVE0X~Ez_yeTrj{Dx&b{`N=bDB6vU zg3gHrm!q*^rk_RIH-dk-qBJqy+_0)iJgV^gz9<{KZI(}w3MCahAy83 zlaN|of(buKpTAE@=j`7E?6HP!kF%a5`4MTJiJ_?zeMvI{v-Q}qO0&@toO%Vo~N1Yc<}RR$M)KE?2CYRl2bqgX4A8b%^_& z94wJ=@C|}-5A4V~F$Uh=HlF|IL98$yKt+b()_0m2Jtt%_CsI6juprj{^}YjN;)(mb zb0VyH@*Lz=JOsT@lr8MGajHRBlZ?FfgV4Dv-Te9`TM3&&ZhwYI$Z`3SppTLh8Ew=ks1p5d|7KLq;q zSIxr_bzWlBc1hSRXXP}2@;6Zl1f3BW4K*S})%{Jx>ZaTFfF87g{G&Bmz#w@qw2I54 zyD9CrCV$%}<}-Mz{dZIAi=EO@1cmx&?QD!P$pn|&l5tfA^%gI?P4O+sZx(Q)M`545 z77Cf24!&JhDFS<6^Sx<&V!r5fj5ZOA9emaO3Wx3Tc8{+EER&;Zh`tp`i2BJiK-M4l zmDVnHH+@syKJCkP;Zd+iryYq>&h2=3s}=`-GM}dRlO3BlP!pS!Fe$)tNx_Je7j-vd z808Nu-kIKwW)Kf`Nieg7@V*+qQAk$SE*vZ@O8YtGHxwH@dk~3%a72E*B`T37J|2^n zlucq#HH;~}Izr%WJOA+bcp@_~uTl8)-k}XvJ2v)b=HPrjbtpF!CPwYw%$nb_Ppm(E z^CAJM`QgCTr@Lujj9_+ZgqGL;)_YSA_G_Jt%DDMux*yAFK<1BwMUI2k4dh33|Jo%q*%*D!+ouY=!91eLd4 z>7MmGAis#^2%jru574AV<9V`=WYBkZ-Hd>gQToF? z+d~g&*zB=eV4kSg|Eo=k%h9T2c3HqRXw6Ke#ukPw-M?VxfDn5Tw(pu@?W4>(Uq9Ks zK$2H1H8kMZGowS$PajL6O@)edwi&L22C6P5GW@o^t)Mp0DwCkH?pN4$036qLh4cWv zRhr&kmOLW5MbS1N+|5OjVnFElcWV>jORMU^=mBuC2TR&N)>vjSuxSXW2x{N+AJGnff+pa!zdr&dY@pU&B zNfzLB+3>sZ!@k*T;Ce3KjtJ!Yk{WrRWyc_=`IyW7PWv zfv&(Cd5AQUXlY_?E%0otrE8L1@l&};zuJ7vp#^0DM2pPyADN{aPGv_$s8Uf<`hdFrVX=h z>|2*@<#Q2hb^c`^a-a)7_k()>`^2DOU-gq4^Pj4qthwU-tfaZ%;J00GPuwB^YNEx= zKx`jU8eSZIa2n_q#= z^%NE2WXjU7NI~0{p6H=2o%@Ui<8j|L8WDHVjk}3TrSn6}XN`GpLT=SqK$=>ygL*rR z`KkFJfzRonWMWbB?UAYng|7xGeDo|?NJeSPl-;KSu(N%-s6B$3Qb32wfH7afoNWNc zGFZGLFG1v0K!>0*6_-LNf{pf1(ed)b@zTMaOZ}xFpLcl)eL@Ada2ud--y24GhX0y0 zeY2`)zYUXzKFmPG&|Nfm>1;N3tf3KGR-@#?!d9A&Fmz*=#}PGI#jnz`ZV#72KWF4s3a&qW*&4@LT~ z3u3_+U;CyZ>_xR^*zKk|q!zwUfQRH`@;_aFsHzcqPYiUN7h1g1T~7*f1Wvn6h#`bv z6&z{}zcIS;6o-(V0=*U({z$MN0{4yiCS6=R`SChjF|aP(ewaYA>;+cFQsrs)eX@N0 z7;7NTbws6BA)}i$M}BI{F_%%Xk`t_7_LrLm$~0J~{tM=NoL#WAENPV(_9*a;T0;BT zfTK0Lw;4`7+N2zSb-ecjx#rZG%j+mj4^Gqc-0_ck01sfx^=UzX?(2y2N4ae|w)D^9 zuthEbI=ppf?pgu_(zo&|-;Xk$ve)3YAiO7`KC(YUpZjW`A-;dT{EmJqdyr*OisV!! zhGBxmgm1p+Al#{;n=pCs1Fqqz0S$#xeav=m06#XQ@oCd~*a?T?y%Mu?c)I4+3Eu7S z?Ud2tna=_L&d>&C>oLr!Jy{;RvVe6#Q^}|&MO9fb_baD2hVadnmmfpQx1|2 z*T&1Evrlo(AsG(V^)9%h3@+#D_ndB%5#bF?AEdsQlYsz;%H(TSnj z5h|Zwb;uC?i5&|KrD;;H-8!%i&Nnv(07TwtDPXLx#9yr~gKLt&@W@Xh(lYRN?+>wp zLjFNScXF~Yx(>y&izV65cG&LyM%OaNW&g!)28FGq0mZ5MF-(y|ZZE+@EvwQ)hhqR+=_^ozk+aB_5p`u7$Jay5x(725BpLWo>lvsrn;6-%InSLX znWADo-%^wC6$+9Wqm}|vxBzc9qZjHceKIX3B{AgJzz|V2=Sx<^>fcD%1>a|&$Pdd6>ea11L}zUXVQ{mSF@Kn1Uy;Rm-b5qDqM(sJt_X zL#Q!lLbQjsj;^k@9N`1$7rnQo87&a_MKB(osfu`>(9s>- zW9+<|P$=0XxfPVX!gexYH~9RK?a|i}=a;ocwyobdPeG@J^!zcEJ)i!f<4JwTGMX?Qs zZOYddR@{Mbc@lWQjaeC-O{eiy`o=Z6|Hy;hKJpX^K+4wZs1{58`#sNVf&~5hV3Y>|HAH9x&1@F*rsqC>Y@L|qxS|;%Z7iG6b~$>BvLV>}ws`du3bc;( z9&H9tK;;D%P*h}QtHUEc7%j9^v(f;sq& zbAihBH}3uF&+mT&=Ls0%&>^L;*oX&JFt)A9h_Eb%3K$Z&|?FPF~_g%ywgL{l!L_{i8#5S}P<^w5h%1Hx%9^){;$CC1}>VyZ{OE>$GC zJx4w*3DD!|mQ9;CkoRi1IQ`1Oq|J2U558LX3R%F#2j8_AfkGh zSI#|hMK)PX*uPqu#o5Xe3+KSx^I^hvuvGiq_8}pdFS5ahkNJE-U4v&{QCPYXn7%|s z%^F+vnTTA`5t#8O(XZRE`)gMEvTA9-U+9#-N1*tKar4p_7o{ZcT6nl56B$eJDUt&h z<;5&WNTWBM0;_sN7418$h{b#faT}3AR}dMf9Ct4eM~B2_>lds7O?hTKkXgl|nAko& z^E%uJ>qWCg*N4&U{k1g%nv9L8Y#Fh5aR5$9qd>3cBEHm-RfNn)+(R(+jXiDzATDp4 znUG+Eic4gHt*`mLc1*}D8L@E+!)fT}q^74ui^W8ep{l>2fPkGPR<-ed)l?7TUcAnf zW+f|LABzG1y_nTZ>~leiE)Jn$UV20#1<`g4(kT<|sN`pY^6mwtaQHEmLCix6$9e8d zRa)qUrre*XJPE)Q{addgL62h#lV2LKMRKujm=9WwOm^u8bC&DYxo2}s?>5DKp=-}F z-Sg^B?>3y?KZU>|C(Vaoo*V3efFigyq34D0kScss-0L|%ft?i78}3;f1*%T90s#JX z5y2ZyC$&v&+JK4lP31< z+ORH36`CBjg7RMoKCQs?BjCZ}Z^1P{zU3S6XqdqdaAM-JoDHx&q@DvD1lhX*G+qpp zUjze2ZvJc&HhEM%I8@9K#@`n9x38OFq>)US4mAJeCWfK<-v?pjPf2Uy5ynb@4$%UB=v%} z#;i&SEwz-p*(|PFgA{NZ;h7f7%IB}QDUWL75OTr~i^Z2ji(BMM!*2|`X5!&nHU~15 zReO?FSAKqQYF0&w6*6xxkwPjzAnvc9R<-ELD=*rLf)S8Jz%iW~cJ7fz~)1^!z zn|_WpDaoiAJB|xfE52iPOt@LFVU+emW{&y6%#=w{QnhkG({K(ok-Sn4WA1W1a!6u| zd^TM24%@7Dw+DL;h^<<@9VRzfaD8yz1IT!V2`|cbJ8SbRzjnTr-2pGRyY<}wD^kTo z0HsB~uBOrjjP*WD(y;#fYqEL4r83-30xk&APnD7JM|T^F51k}@q86Hl z4apzz8Cz>54sPT(IJX}MKNcPb-8j3|>)YNFw-4zk{h^rh{m{0G0{s0RB_N zyf~e&pZs*@w}HT14fk9YXw@|wBx(xPHDP&(zC@7ri}D9!9%2o1{q&DsvxW(gDwFFU z_U6?N{XpTkr9&KBHwQk$D|j??zg%qq)q&+d)~y&`h0yaH%(MP6rpu^Z5T{A%>Y4P=#l{7%m%O{ z#KWr)THCWk`~-b`A^5Xb1BuA-*X?4>*hSB2?gg%OY%eOp?fkxWx|01A@Na;Q8L09J zFj)O9h|}^C;BaVF2(W2!`0^yKO&<6*)e15#kPbqm$1hDPo72jEyz|vy1{~s+Kz&^ z4SeYwWub(3@_<0J!&%c}wDNi?wqA zuGhpPU`nX%-M{LlPIw1^=A{1gB*c8m-$P2}P}=O?$FTvN*{?6aKqbK@ACasc4r$>f_g4S0858v)oKQr`i7 zJdN9T%Sw&x7ihMgibBI_^!~l=8=VCwdj;Z{4X2$ZhX%csU}mk#r>atQ$pwAdOzN~ zKro%o2TB^E%cBg^AAKVCt=PMUKshRYLdYrgOI4z0w7J{29n#a;;$52_z9tshuue@etz=4l6Dz4k*{9?u8M{#_ys z?4VOhC0-!u4}-}}aD_U2j4_zQj>XlfCH!>9sNwG+{`?hNXN&gewET)g8QA6!2>a}= zNQE5J$zXi5=q#{#@C)lX3?w$gu;B4mV<ycmoKmFB&Mvejb_i z9>N=i`!M}9V;I5k)d8xDk+Sy8_63+_iA^;0T8v;!u*U>RQXJE=E`1A0vX`UJ8&Si8 zKa3yP7L(Z}WsXx*3iBp}W=QC*$2bH7ikrlaxbdHeY$eFo{~F;_(?@TSe;1%Dv25gLpiNTn-_{QM z_4m6#ya)SJ=Ue~g#*_i7uOP&JR^i9r)U%H(;)FUQ(#4l(7s2r}Uzr?Kh@V={5&?>* z+9tkbD-VsS)uygn?w%1+h<~6Npy7uw=};`?56_zE?b;AKLFh)#2$5q*PXNRf5( zPJ61OY52#bJaRUjG(GpecN$5zASw04u<0Q5n*j;sg8=1sM-7MysLB3khu3< zXfCjwpvURT2gqXL6Q?6c8Ud11J%c|sVz7d;DTaO%o7{G*^o<(T@{w(sy93o zpx+bJE&3}YYD`7Tck2(?8x=4PYudl-e!Qo!qAUOG>|yqttnt>R9cFxY6k{y3o2=Zp z&lEq^r5AmvEK(>YSCy_A`5(IBsBv-*Xh|Nlvo3pd^$#7dp`rd(wtytv5O@_bR7GO< zL{N&LZ)#+!9t~V4gp9FN7eCuSNV%>d>DOanvLj5b~n%jjdkDPA!EX-0#Piw1n#mLS#Xl!f;WbRtPzH@{kRwZSbH|!D_KG%5oDJmhZa1>a?C}&FmB; zBG({Upl-7TyrEC<0drLi)}Adm3AJ;iM5N zGYB?_a!Of|yGu$^tAZ>|Wy+brz62pUma_8iNc0wjzxGcJn#wk7Z^~Crj5Sigo*)ok z0-&2V*p zKAHqhAbmV48NhlJxt#;^Q7$#iLnFMtW2n$S%`4IX zQ=~F6Nr1xSTa9#`7)%W9xC&c|@te+ibLy#}z>}l`zRFi<6sC--Tdeap1{BMjvJBTN zx@Dbd0T3Tc4@6fs!mvP-;94=dju8}dw`*>$bIvWcuHMfF16Sw5QRqeQ#7fzwI2^>& z0_W`lFlxnbdZ)P?S7VC}n=wM^BIgZCp{jqskY#C>bjt7gT9KVKFwJByceZUbe2aWN z{+JKWnRwd3akA^CnsH1c;~d(UM{p#(+jynL0n8&81J`ZF0D^p2@J(+3$sc&+hvqte zz46T7AiCv%^0gD*B_yrw997 zZlS$ArJ1r!Fr-|gZ;mhs&EI-Q|1JBFt`*&$sJyDUsWVyDY7AJniu?wWZKD!=w!HI$ zYG1dcneb%DSP~vb5?)1ji08A&CUpR|5rna`VH^y=u%5q|&U}=93irH! zktO3qQ}Q=F8L&-t1UvOg;ALYXf#{X;YX{XoWoQ#e@H{QNtyaC!6zrS8%R)^^GhH{1 z(S`P%hRcoBn3bEz9kWrH*+6WK5E#9FD1_+0m^MrFSnI>dMqjIyP>Zu_Op580z#e|p zR<36Y=3oiK_`(ox>o3jrIHq|Awn%Gw)wk~R?FH@CL(Q-{p`4_7YW$D11Ckq%Rl9S= zNKmFaPp)+e?w>?ElDa`<@C#9buPo-yS=)a2dYA8*525Sb|JpAt<+5y-ahOD&@s zAqP8?XU>@mb^CyTB_qxN-cd{MxPh3oGLu|Ox9dyI&)k3=jCVy5tK>eN5-+MYhW!y~ zgkzxn$NsvFkuYLP!2Z>b+SG6KiHbt`yP<7UeHyyA6NqP$gcO(cY`PD;K zxicsD5m?-BGeT8G34REYdCM^8XTU$W*g@!uW~orYeHhMq{%nrRf@>REZoaqRRZ%#k z!ejd)n_)2Vk5Sv0c;N|_5jMDTb%(q zFSGBPf>9;cO(zn_YdrQnAfvH3XDYqZkmvalXDWn@I$=w~+`pAT3t96Os96!V#Md8I zJa;u$^8S3Nou$&Sk?~k4{MHzZ;@mVzz)U+wwJ}KdGlIq-@+%k$;eTspdN1&_PD#3u z%Q;RfY`)d%2q{w?N+|>JN`SHrf&G=tWA{CqA-c@WC4+>_$3x#Eu}E6Y$|;!^C#MFe zygI<<(D-SyN<$6b?51Kaf9;84xQcDUQj!$X^QGwPb(VhHsCr?(Bcr7Ku~HP2>eb|P zC)@k**+#jw6!6@sSl!*te~XW03-6KBj)gqmH^7c{aCD}dUk4A~Dw0gY#Z9ZV?&Xz>qZbE|G{8#}Wb^HOIwa$_~PhO~VlV z_eHc9rZ43C7X3a0=5JP+9Z_dLnVk2}qhoRl?Ok-Y6X6W^61Iy;f%;z)iEQ-4c)*xG zT4OCZdKVq>EdR@+_06 zV$+uP^B2eyX&=;Yq?**VR5`QeQ#?zlMxy~j(#mvlbyyYcRz8lTX#wJU&qO_O+YK#DbSBYQ$5b+f-+=_WV+$DxSYx&7(}NIr~1L#BVD?IUz#^okuxvcA#m6VZCM8= zH%*#*#AqF5wZuR^lvFCPmL-2EFtL)Ezk@VAY*)?f;6-*@yCUTNq_nMvr zRLuqu#J#V701$IqPXzx{`T&`rTjAd=1p&0--(lIVJ0#z73=Sb*t!!(vc!vXAy@>B0 zlBT4-^M^yyT1bW@NPk3dfoV^owqR|w1~7JA=o2z(85%dJfh*|bs&+c;OVl_^gw7s~ zvd*2P`mmVOIynS$;H(WecQ*i9m{L<+0ZsP)*xtSLo0l!1ag~3zavk&Rj z(H({flH;c}7c7=@pggu{t7Dy6i_GaXb-V)>V$Nf-Qd2U%BYWv$Mdz^c$I!#WS0&c{ zw{X?G-!Da%uev{Uv+Nh;Jb;}5?;jQ!0U@vTeTO0!xDDajFPw|bHdXc^QD5DXLw$sb`^C1rx_4nbulpHp@EkN<4D9l_+N8)_#|(vo3Ou-h@iANNf}i%1+0=$|)Sp zwU(9Rv?56~ibNm@)L^cf*>Lhk_9XXWd&W|%+*bBA74E;3dfie^2qKI2BwFWfrcfFC zG2j4D^B`2q*+f;s?+-&B@tD=lU<-QAH&ZN1F~j?ZgRtSd+-E7BzS6VBmQqK0sZaJt z<5ugUPZ75kie0~}ixP@2mPLQ3Jtl{9S(P$|)o%ERk6dkFgCj-ApB4dsLW*k`9lUiE0CD4Ou<32bj-yp+*jN&dO1*K zJ@2}NJr(?Z#sa|+He?eMZT0D+hyL*v_$ro*ba`A$p}08p3N9(!Qp=>0W9n1&i8Kk< zm$wZ|FrLloF4oYiz3RsDY>Tvi1kZyj{R^%2%<`8=Md!vS>8dj;1&ru6sX z*9Im7pd$gGR}p|;R|FCrHEpSGH$s4S!1AVME9ub4?!l~^tX#WpVfrL!9XF`y!2`onx_P3EyMPMgDy&MAO{D!XHG*0n@=i z+%&cUa0N?yB*VgaE0Q42sryKLiA(W>RzJskwl$2F*p+lyouCA4>W`REeEaHz zYuzTV*!0?)eOikX6o`KE!;?rBU4G3%LWXdBJD@5Ge;UjPwFL#*5$#3=1xhP^p*4#6 zg1Qco0>mjBT{NPuq8+uF-Ho^Da&~JaY)9^|w%V{pvZ+@fSnaKtOlGvIkjHq}oI9DO z1xWs&;C9Kg;94pzRN9WRuF`jt##=p$+(Zg92QADktZ0HN_EfS~0aV4f@U)yogQL<6wV_%>-_iNz=aV=;reBl!C zu8pdswJC+Mqw#tThG}=4+TuDQ@MCT3;v1{IP-bGsEm@WpOFA)&Ji#7j8^ zF`C?&`?bAgV=vhZj&zr0>+c9o)zyZPk!>oZN~pZrF?=T{Emc+-5^NGAwQD)%p$3C_vdjrAF|c_vHXcEk59#XC zzfE+nfn|9`lXU=qV(TL8trApCc0<$`9iLAi_ zis#)EEuvpFY6LBxHJPw@uf-hyb#c^%I-!;STKaw72JyWQRLK?;#JE}|8xDTn#~qB| zcm{X*{qs&YOxk#wGBuQFsJKmzaiZ*L+*R>1?Y(3gy98n$tANs7+7wc{%WZO2uty12 z;wZ>&WQr0V7N^Y7g-A!3jJn$}MpEJk@gvr|0LjexfauvgU`=E;d)x8Nl=YPKo96bH zTlexkN^zp5#yFy)b^D$taJc_!Rvl`&%|v!22~mASdtmNtKN(VA|01<{UODmf4(8(HCImhktpJqb=3I5)%KsnV)Cs0t9eb z|1^0yrd|UW;2r2j0^Jf}Mz;I`wDrCjH2&$We*^se__-c@YkU9{yu&^Z$CGZCfIqLT z{2+!-{;kYFx;UWqTuWifK&7%OqriX3Imq!`sMBqR{RIU66itQm=FsGcnp)hSid*v% z$;E?xcc&N)Aj@vWJvtDXA;X7OM;oAab(v)AM@O@gKNkHqbyq`!WQy?p=AOUvZ~2}G zXn7tyAIRg>K&a84=n4FPb65Ar?NJLXzni6W)|Q+CAr;gkUf5h z&!&U|Yu!jLI;TK*SJSoZUr9h%sM99;j4&437DC7Ok=ektQDs(qhq`FWCUHyugOm*0 z%cM=rr{diFASJ+aZoud+LxE+9lJ}fw9cTTl$@|44%+dywPSs4#;rVaRVLa&g(NTl~ z-;WPxuUi+K7G2}e2V@1VBr5j5o4X-tg|9E=o=~0zQCgcuK6b)G`~_`(>eAv7bU%I) zVnJVwqwNL5jM1)4SHB9OM?u6_{Gb+G%BOuWA-9yVDAOIX# zpU#HxJg5g8GByQSFFG;H4!;XXh)aea*_LvZ4JwQAr<@KszFa({^i$cP=Q!W8FYXd z*Wi@u)=k9O{{KNqvR%hg`$;~8g8nTD|Lj@(-!0m`x*>Z22IMO*!bw$Z5>W>RomS>3 zO^-RG(~;UO*Eg-?l`a2|BdPxXb0k-Kz_W|vHVZBxb?MzF8L&JibGQIxA(HxN_m8cU z*&j&*E?GM}#usJnL<;S;N9NrY9IWvo161^=8w2th6chWmwz@{jVCnrc0c#=0ibAul zBw=?uy+52=z(%!fMR4?#i-hWj|I1H4CWmd}<$`o*BnDy#8#eswaTw~&7n@}-3hyaF z<5pF|Z=#@U{s{NgsAfDUkgv)FKD)`VLX7<+^y?&xI%31D^zRRF=8taQvpta^7{GAc zCpjfy?H+u_-VlVkT#ixyK}xd(j+#D)uV~gAL&}n#fTBx5c9<11EP*|&p^j7SJiBHJ za^)hH-B9X+u8(bx-BhfZLO464@Mk@gR?F*x^%mxhbj1ZA_`LnqVTWgh#^0+CK-VHS z7@pB-Jk&JzT~S>^U@fY(WV+Gj9^iWklydB~ zWD)dQml%V7Uuh9YUjw*3=gI**-R`QR0$QTVh`wnXMy8kRC{S$oq?qKTFWiG@gm>i= zR>L8@$Td2c{>dW0+c=-hT2T#ax+6%|=r#S0cWE}y}8A^cc#S#??2a)a6`;e8vDU>ex?>LA8g;VHG}KR>@x=(x96_11Rv*=n2iC(Xs39 zCmi7y{Kxgw_zqV4b)YD1gUGWUgtul@U(ss}?(Dg%Z3XI*_aVu6#ZotY3~eDm@0(HW zIbz8Z;d6ZF2lW?Vx4doV6j0>}^9Znv)hzgkhFFrsW3I|H3>|@5Ln7ThhHSNm5E9$_ zA-@YI0-6W3hkNUHTLmJfR~|4$5R3`uO}XvQRO0+Ec!k#x!z8e{lWM@!jL-Onbi0Hw zNv|Oj;uUp$-Vbs?w{)kV?5;rgf=D0J7d0kv-Wbbjq7mz!h{eDL;~3=ifV^6VICja} zGuhV<;e5IbL3#uGo3S`MWxN`Jn(7n`_GLTX+&Yuddp_n(6=f1>s=Q`tj`3fZgi%Gx zzIa6()KYR73VMe+hgS2e&^b7@X4y8cul`LmiabRz>xJe`WRz6*n?aDKHk=4q#Nd3I zIjLZWTp~cggRoLjZ(J45B#{V~3D1le&N6Q`I`+ooS-vwLqA{bWKt4@-)o04eXqCyl z?^wKR`s`PmZBhgMMblw+9eeM`VtG{m_Ib}+38OK+mB*Y@UVZe|u5Y~;G{fLg;XIK@ zT0_hGtc->?SObgiIK23~i|lV+)0%0UyF$U?9WTUb@vX9cqe^CXNoH*RX}TA^`S!kT zy*+RR^5$cJ4g`) zQAV12+?_0Zok!tV{-oOnV8X(IR$qrV@6iwCI-4jw%z{tc3tkZtQhWg*w+64B4gdzj zs)qnZ-GgJjd~VE)2R)&`?ijnY(kc{Ji&LtgPr-%#tkGbr*K`Fjx>>B@vAiaI0+Npy z-pbZwey2(jFz5)R-uXF7eMvm4tm@g2jv}UbzW+jZ;%Lmp46zoOaZeZqy?eGvc#t%% zBC6td7zF%2-JPEAK35&;c0Qkv5B{ut5kmf2MF>e(Vq~xEd1QB4`-Pa(>g`o@6WBW= z6o+>8ba33(rb*cQ^))?8b1JprtBvc(9V%Xb$r1r}`Y1?$K>Aubw{mqzqQD<&-m?JA zwB>s6c+zXnf5@iOptRV;MUyoQ3R(zH1}*`NG>rBJSMFV`EB%VMUzkj1drmhA6fo!3 z8x>v#u)=rasqqoA{L7oF%-fIu7)mBHFr6*aQ$_?zc)`l z1z3L;=pPg?8w5KMB)nSA>Q*8x7bVnS!!4u#X3Ikbv;XL8P%C)yQ7N z)H(Qmb{3e0^fS>`28oZUnb#1ZSNzbh;cLys=ce-A**c=550kSuz5E`IgM8n*JNM|I zH$zg!J~;g_z}wB$H=z~~@L0Jz6i~BYfdL?2Y5)CdP12)*bLV!H_|(OodlXI4YI9)R z(9J>Nty7N>)?u(`}rmJP-Qtfp*lLs;8%b0;TiOz(+LB zKhoXO9Iq-_S z%imBs7*RG+JjUD+(8|Jj8D-^B8RE*N$J)ZvVSD>fDQ*|Er*c}Px)V}1cCeCJ0^#T- zRd@2_ASaj-{yl0V^xNjVN9P|vl^A{37BJgC@6UJu@N#opdJWLHQ@aPSX@28C&XIBj zu57$5ZeD)G{S{2Txu!=$W^965@x(3FoCI&FR9bd^Actis2h{!>oT37}%Wj8iK&c)- z%VOk`U(=xh@gGoaLoj>cRzGS4Cm?_%VerOmBzG`m#EJ8gHwA zgK34q?&j-b_Vs&r0mbF!A|!xg^VvPm{+Tmr48X{JG6WQ;aM&-5-R4|1-aN zUC==!e*9b8b0y>}UULtGt9BLK@ozXoVBw{fSwzs&yrFbqH5-vQS=*^ICR}gfVkyn3 z0fom3PaZ$|Ot&en4*pr}YTzS*+>E+#0DbXGXtDk}@z{hEBm9_2!uFuodR2Ze!h)`3 zo{tmH1j>PrS#x^OA#>yn@K;x=Mn1y5uB?N@-sP(Jo-F7+)l!>tSl#A47v&W{aai5d zRXfYV&tt6O7f1gZn$xb+#9l^ws~nz?)j-e+%&DCZbKeB6{={d4`$wkb#OC zofqPLSGb@&MFobHv5mweBZnn__llglTozo}pNtZE6m@u8Vx}m%DO;4~-XOd}yTVe~ z?eYLKf|)YXRM*{SOR!G#OX6%N(-Euy$+KFSLT4YT%vmS4(uHerF`a%-T1}FYhBfzE z{(JvLBKJ)zNV}91@Ost8c|y43?+LitzaIp=2=2@X{tW;e^O1-;M!aQ&`r>}Xi0&+5 zG8m77(AD7;;YQ7k4xG%J??0Rd-FB3|6!xZD%zx~xkPjsO<-J5V0Am~nv;ofR(>noQ z=cl*%9f0mAituAK$#)HkN=jj|!08ENrSBqW^um>7};@q4c$%#@(vuv#!C1G-W@!>H)J*Y=<~*R{hC*3VU)`GGLJ|F_Y10x@;OP+ z9_|i*o763&oDb~*eOxb6b{1;W;iErFAx`GK zY5CxMawA?7#0fqyx3~V>E*$U8J~7%Z@TKO4eFfk-FXa*`G^nb4w_Sb$PG4Wu{{cMK zwj4eKsyxl#0hjFd3Lm&-JI)G%!tyt0%r!^I0l}NNfGvrSo6C|J@0)7xAdcnlaO?mR zi$R(?>+qmiJbeB%s+S1K_)^S(jO zu|FV?;c}0`9Y`S}3J6GnpoqWYMJ7$5G7|R!L-l54@Ed?wUSsfP0!69B5nf6Im7pU} zmbJQ-6buH5N|R-%Fw~i<!k<^in`Sg-Z7hNuxrWQ3#4fk=1j%abeaFf7$pw7COgXJ31E9Q>RY;ThJ4_?0poK!`_JcG){PSfp|f`6&ey`2&F#8DUx0mrhp_pcZ>PeeyJo!Y+#MM(HX6}K8VV&wb1hg9GLFryeSdntSiSi0xz#hGp#UQ4?d|E^nQVq$ioVyZ&w0~^vX1l< zE0W*^ViihUgX%y^n5RW%bbXGXzx~u8V!6&bfHpQneC#v~{z@R#DEh-oR7+ZaOkQu^ z(FX3#1lou^SCgcFvF3({)J7<;%abI!vWjYM_I;iqt_WCse`lwyGcDGr<9$;tpC0{n zi-M=!flk|%c|-IxEJwICnJw9W1Kle&ms6YJJ(>^%EF#b+5ZFiQuX}l4D<2*_}yE<0% z#$59o-elss(Cf7e8x1$cl3IDeCS1EFrXeI5c2ErwdQ4Eul5R55zCyn# zdogun{XPjn35tx8#7fDL?i-nzlSU15a?vi|3ivw`gf7(v372sr2pK6IkV3d60W98 zsh;Q;Vuc}a${tL`fnc>~2J>fB@c7(aeZ(FXd#PeOPNbL= z)ozA0)6AwG%I@jmdb)O7@7 zjUqXgH&hjQNru{7%(*fg%%^K?&plKed?dvXrUcv_AId!~frO6{cwV@dk8papFaN41 zwlN`V!7VBbh>QdpbGTjwurLB2778NlnUNhwoC&z8H* zOllf&(mqO*59m;VaAXX|86+>j#Oufxdf?PJq0r{E{gNIGSS?uA8gv%J)*jw9=qxl{ z`vh4UJ)``Wo7vw01ZD$2GzpU9LLxlUm$?sxAYIp8_z|*V_G?>YCJ$(~5{179>sYsq zLgeo4pR89~?W7;49H`|5kY}o3>kHoQ&*dJUq2oL%%-g8;jZht_Rv+SWF%@&Uu=^}R z;fVQvg`}>1;=)?kCI%FNp{w2PNk=iMts)Gq^9i z0r7XvrE=xw(+&T|9%ub(pESca&9`%_8OsQZ3rXnWyE8BM08n300*FEk4wdIjlxhV+ z8?Dgv3ZXmDhpGd9RsurcE(H2$i*({si4eAx6PX2%iy%@j(|6W*LV25$kdNwf37jpn zTdsF+^jQ#jSOsFhbG*A4UN;8gLfE|of~}zx(D5DS3OsT`H-5L;1^0&`Vth800l|l4 zsv%zukV+JQZh?qJjJC#Jdd?Esq0hCQKF}HbPGQw-=#sDR6>j>}ou}JNkH|F}z~ubd zhCjwp1YD1?RO6C!?esG~;D@bJGA(O2a(EJ@pA6ArCZaL}HdP&&m8YVBvd>txpck`` z$NRH7j>nU{U0wtM+SlKL;xVC1+O^*scl~$D=idOQm--GsZ&KOkcR=%qJW3!1v`~&T z5CgnX%^m%BJ6YkMMxs+||}T38u#SiKMf)y0=z=3c4mzTPyZ#Gggz(0~+1D z;HPeJ9N~1SaCLbs7BKga#0u>DD32WpW3?e-%YCq_74(eJ$-52=(6amd4-}zy1|qXn za({2^PFyp0a10^Tlp|fi<~W%UDCb525}1)ZV<(QL-4fwIE@}A0g^lGUWVt^LTAm@v zb;6l-=oBJP82?0J$3-h)po3N~;D)Am3PU@q~(YwPQYBWz8s2(j%jMG5k6 z(HRFkh!1PBH`~wi$dH&pRKS*uHmCJd5#;V- zth7%wlAw+UJKQn|rm#<7o&~n%K@|*VH`;@1C551E@DjM$gB8?HabdZ1|D%VlFtoR! z+)E0cO|l@LN7sa51Cd4K_+;nxajXd@jq7(k?7-^A$Cf|?k9no>0xYtXxI`wG1N@)P znSKE-PRo7fvH^Ab)PjIjn;%ohSq5;+{WhR({C7(Ol;xFm8Z;eGFel5;>b@(IK!qxD zpEm?cd5U=nU30Ihq}>bPk|5fGb>xer978|X)2N8U*_jw=v=hP1+fbqC;mlc7B2ZB~ z0k-p25e*w<8k@gf98obU13ZD;an4=2BBEq{tJ0)?09C)RnvQ`FL83vwL%=D3%n#SPmh*9_N?RaQs$t^QOICf0e78N9?5GG10O0*f=a zd+rPh1iShioxUG?yq@f~Z=~K*gwCn)1GlRQZZMtWI29LS?c?{Gv!lW zliBJDgCMu;X4qb@d5J=0pD!tYEjt z6OJV<*N{vou5)|dE5^!qI`)N}$zIBueLT{;pnDHPwRy8bqFe5ktklEH-3j zE-?^?myi&xJ*KH@C10rhv@GR!apcdZp*Er?kxENScC1*@B0Vqpc{OWVpKQR|y|EF? zf;v$ThIj>}F5;)pq{jXPhj*T$zQ#1VHAvXGMQPhvO&fbd=-x?#tuA^LuW%%YN2oz` z|HPbtqUO(mqs)fFbGtsg!A<|bO4xVm=IDOMv17wYiY*bRs;G5=1|AwEkh;7H?4T}= zjs*s#au_@p&&R^5=Y3Y#eT$*YB~l9)nqG}DW_z|JEw6`W0t(SSwm)l3?=S&9qntu_ zBfFkR<8&B(mN3^7meoU{c}X-ZSE>J>#DFKDT#!w)x_W4j!2GE+MDx&=f`=gP@TqyJ z6jw3lBShnBf;U|(JQZk}Rorz&V7Y;Ai8=)7AnC^()?--f471eS2C6?{S=k#zn0uKT zg9c=L!v}l&V>yTo5iModKa)Y-w_uMa<`ezoXW*N5q#ax5^`n}TMCzkbkZngN)O#6#wFktsb zdPFHidgTlNl@ad7x0cSe9OO0jD)a7H8sKh&J=ogSLBB|LG(I0GV4nLpGS*k8RslMw zSS&3*76JMJUpml>siS2p`v8x~x`~MSXRvq26dow5-vGYLXMOS?!M%IisO|2@22@-E zW^0{Xkmm5f)cBwx_aeJ*PotY3%E179g|!|!ftFVd;5@uG0c;6JvBxpHUEHK&3_X5Y z^?2(RDaTW=F1pH@1O$tvQBGybZ8)%#rC&);uW}GKq^0XrA(sv8v&Y&IWf)%uqX6_@ zpnqxrH$MSix6CNO)o1#vE|o9!DWH2Kc43F=Yscg<9@@$N@%=`;Zk1Mm_5Ot4Y2Igm z5!tmYy4aHG#A;}oR|eLPxUQ6CFj(D}mcduaWSA2;Sdpv6+_(%C8OT^2U4mo$_utm0 z6z(BS9V;i2`0{Zp*}@-mcbnhN35TS_%0bBtKB!;Om_Mh1>DHPQV+t*^+`eSU!Zf%K zzkrhL;S}w%%K|p$1($YlUU3OV|v(}ym9BzcYTdme?&wlg)y-AMWWn}VK(!q zaqME?aXt6s0(us)pms}2rx*I#y6ko&RjGsXQn?G2SbIFeU2-D->}hJEYkR%Lb!EQ# zBKOsvh;Tj70cl6lJ{ljVt~OW_t_DFe^+uhMfML!{xncXu7n4;pLu4~>L5 z(Ubs`1n~zfiiU*k1(pZ&v{HX^pRwF>nzTA<@}<0LZ`V4QxQcA~sQLhqk3XV81pAl- zINb>9b7y+evtVD?iPU7z4xW+O6$iw(#~N_8-MC_Joy4zqk^&GxN$+c-eaj&*S@gxi z_~5XS24S@qTXZ0Zv2Y1*(o>c4f(*x#(1}M&rgxTyMkC2$v{p-@$axu6eIRi?*4v!7 z8MnL1wy$@Z52IF98*g``EBvo#XpgzJff5YUC@)c>6N2Eq!g0P9JeYg`sz$^(jS`3z z6rX5U?D?&d+i_gst+<1-Ds)s)AF1@^RE+G!KH>$hN03F4%~0AHZ3)C!Lo*|F!r4lsVaHMeyo*4Ld^xS{-|4Gb~L)E2&qV=#Krq}m>NM8 zJy6pHA5!#`Ti(zV<4LXDdN+z_;4?phU%hp$r{gtt&swOBG<*d=x{A+&M+jS?+W!Lh zKnK5I`*NhF`q%_gQ}MTRk{ZiSe;HEK(ZZW2)f!TJ5~Q{XQZ2KAULqgeL2u4GkaLoi zu*8Ql4UaNGeRL;1t~qb79QM-;h;b{=?-r3zt1_Z$sos!usdmC@zK3Y)HV zZC4J^S3pUlCY{Xa$y*s2A^E73nUJ^Lv?G9|Eneb(?k|5l>e=J@aia=|LvfO z|6IvaIsP+R<0b;Mi2$u90@TFj(DM6XQPU@NFD&AjCT#S(2^)>p+zuRFjJPujVbpML zwJ>J2M^p|Y)I9PnuNdFW?XEiXzmoD#W%}RIK`&GP>ELL;(f?NQRMr0)t*_DgzQcPz zjpFvbC~mg8mQlPkfxg3R0=Y9+U?N38@Ka&vmWzi&L}fi0G=VJAUbc2*637j@RH07u z7B~UXt9N$sHgfB$bE)h-i_c^*BRiytkW1dgh-&N3Uu9b-0Tgg*k|K+UlDlEXnu#FM8nMR zFNfY6({Dpfznrzs8h-_MYws{0laPaLCkT@_OH;p3GFMN5x>Mx@VRSLY%4eGZDp5Hy zd()L7hfwzf7!H^DAHB(+ zRbdw0ha7vD$>lr|1NRESw?UMzdjz_@+4!@oZWiKeb{Dg!AYteSh|11nP}3~*XXY?L z6GEr`cW82!S6Z{myi)yq6zf!_kJ%H`9j2BiHM^KnZYPXG-Va6-_*wF5?$|bQc-^&dcD?r6gF%V#$G4v{ny3 zspLp`vwA2W$I|wH$=UD!Dz*O)y4}Nc{NLeWQ~z}}PZj&WvHTm$|NF4~8=L=ou=xv* zdfyWl{enHDPK2+-y8VJZ5BsA}LFKcwxC21^>|G+IE5wIeiOgh0CulNiFIibFBKpq$ zu$4Ylwu`H<w%zaw!;;WE67(VbIAQep%pS1_Fv)>~MYWC-i|(pU6;mD)cFRZOKPp zIM_c-mf|e8;D^y@eF<@~*}6Mqd>RNPt91|dGH)~d?-_P;;$>5I?XVj`1AW(mOi#M+ zVIM)dy#%0t9FPCp1^b6DkGn6AyF1`-plHnWAY#iB3tl=KD@|ssZK^Z8@zXiN`@Y!_ z3}KvqsU}{0QmoiJS`jO*Ehnx|5b+FFJo6@m@ed*YFqB-m0n9j?>%*?!MSS%u|+qTiY)@t0yKl?+9&;mRm|9o(> zraFj?Rnu@_6%Jg^tSMl_C&GpME8@bnWx^-14_eBe{Bjfca88RR=goEHsm7RT=zDH! z<_XbvW5`q_@b@Zg;!=fzyrFa4bjn_4%F-$JIif@8Z zbZ!t&G|#z1^ps_&#`8xVnDYc_%)}Hu5Rs+2FQc+3!l??GMU6L~&VI$C@%wNxK=hpW zKBv&RdE;}65&OiDy9miutZwC);Q&(O$!p3XOO%I;DMEsnd{49=AudV6@d!i0fRiMK zCpfj)>h&G=qu2XpSiNiJ{nnQ~sWpFf^n_RWBl^OHl5$Qw4>_FBD~G}W$y>UW*I1Sv zdZRabz=bg^W1Jk*sgcWWE#9MvULS^sFjrX=I);-0AJNJllb9IC6 zUL%eiKAR&ah%q=qCqTZHt2pOu1S~qoo%#+KOs!I%2i-2~fHyu1DFW|5UWmpH@rQU6 zijBY_lp=?FQ!$i`c^i{lhl_%T-Jm<*>UmVxg*l#}FMTo`GMT?sWxkeDxX2!J5yiLL zg)i$YuX(yD01-b_h2(Fby(}nyD^`=cPv{zSKJmUM{A%huV@iC?N{YHIO29thl6s>- zW)N*9Qq6W)M6MI;_leBQjwi@oXz{u(vZhu!VGP++V!bqhU!*LH>HHM*jyH!QF3;Y+ zevL*Hx^w6j;w1nnh0`3(Hl?h-s7^H42d)N1me51M4W$Y}d<0CzV;8dK4zVMXG;jij_zS>{g$TfqlL?fztT)XeIXS;GIX}!In6kUx#FbZI zn9Q$$1ueIuVzOXcPGLtYI;N>C>W5BS(Bb$NweA;6ZoS5axTGh{Zhlb>I`6F z#FNC(CohUH5a;A}yimyKU5UXRnZ?+ebtd_pzd&ZYmqN|WAX~0s-$O$#$J$8`GHYJN zI1#T_&XUK%$&y*x>{CpTmjqCE_j5QLBFbj<^w+|d&!IC$?qcu0Lz7=9nmL8ym#;iH zsFGeTk-E^Ps`Tod%to#xuK4Kkb%I>QYyXas}=PgJIWG$mv?Nd@H^{fy5uldW5aBy4U@Ch zh0g4Y44eIi*}TPIRC91Q7Gg9&bV%q#2v5-!P$0;;U>^v9Pl@MQE^6a_j(@UZ-ZQNt zB8wVD-_%sxC`O;bo`+ds>6fCpd$Z=A3UxNNQruuSn$!+Z`Ub4N7^5GL(O1DO zwkh@XYf@a^-?!qf)z7iKyT5PU|IzEp;xBukE}&|FM#Xa<&Km}`Hs=Q=9QQlq&bTI) zyAyqF0?)~b%*%AMXI~T7$wAhR?J`g`d{@SIS5x0nF?yBR?mWv%Aj?Z2kC|qi#K!C- z;}e<8Yk(Z$JA%Fo3jvbBXXJ49U@XX{zB9D(V{zbbka1hE7;v#fA4t5W_$?WCokzlA zJQe>KG8F)c+}7@b6nvzwer-l5c9wWA4SmeHam;`3D_UBAWQ$gP>t{$RvV!&9yj)RN zA-^n%M_g8Er?D|n`?eB+qAo`*DZr1E{NAerp1L-bqj*p0Z7N&7CocMpkee`AMxnC_ z%_?SJk#F8VxN_zb7kP4)bMs9+otz86d2}dU{m^wOVyv$Qt&KxAofi2eg6_034O*mT za5=vQgPH$AxlyM@dd!Snf+9vj>Y#_yYJ9?}g%yit^QP|;J(sjDlREc}=duH~C-6&& zsw2BxigHg+K{s*paDx={OY~V@IlVN*uv!YHdib2Y3EkYp2~{XN8*OPweU-#-rbbhk zwJ*1spJF@=rg?~+Q@fu)AC8dQkA^lkoy;(`s`^if8=lrl^-|}*wZwlO92~vsrtklB zk6tzBzpHpk&wra^-{#o2hGXC8KsNrWjsxHL=kMppw>jzE_>;DpZUiFj{qF13%L|uV@iM{|C*2fu`2@_Ai288s^VF7C> zSOiffBev*E7`|3w)9$1So7iUDnzD%<9i7UJZSjnaCDxZ{>U+o^@$pg&vkQ*9;3YWd z#*xZD*|GjbW^f)j4h*1!`E)rBjw7Zw?VMKnkgt&MVt*u=k$ogrWP;@1%AK&#!B?CG zvzM1c7}>6CriNF*F=f-##{8TBdoG#Yp!K79DKFQe1yDNmXPg?Y7tJg+Cr$taLdOsU zngFO$%D1%RK1u2Mp8BQee6RB04Eem)p?9piD7|i+9PB61`yQC@%%9riQ9tWVu#YF< zWVI{^{b`cwXLDG@UChqAIE%T`{TFf71pB7o__odZIaSBMdwx5Zm*1X(&U#LKU%J9S zlEwD8I@MC?NizWD(J`B?tj^WNrdn=E{;-*v^EG~xaRHtq=f5ih^3A5ryq=GbBh>ECZA-wBe91JJmTEw9nZz= zCdn<>7i7;8^v0Px&@KWMmnuM6RwN&cv*&AvPc?;llE$Pn+mYn-ROrl!K(2w-OlJ|X zRV1+(h@$Z4hRDpW{?;EMCQy6@C&4@=b?V;Du>k}y1xck1yzs63xGhN@V+7s4Zr+DM zfP8m(3Q1?)-k8nqbr^U;M?fF(1ThXL!Q;?~ONmTkmRB$2a1@zqalDFN;XPGz54dAf@3Ce6W=Dm|lqmX)0uQLfY zx<(|yw8mA#0HzSRkb_r4s=F}ipV{pn%JY=WiSu|cfmEKG5=i7ZEpi_Wmje%}93w2C+d^69o(mB)s|8rskFbvv;jDh-&B!ak<&@pwRu6 zV#YBZjS=IP+J}9UfUG8tYy;wNeiG^OXx5Y_(iBQX>oTwV+)kf^(XMlSHlyht^Zi+zn-RrS%l+Qsjeh(_ZKo@_7W*dbw(l@w*dK`= z-o%K094eb&#^xxtEr>&1VzEeMUQ(N=`DaMYRIOtkxyv7_A~7nx67%SGzMAD@5#+kd zI(0$iH$1`oSIi&joR5+qn4==M1X);a4h$KR8@{s{oT{@`rQq!djFFldW#)Y z@y2KJDXaf+c=RfD|Fd`8J815IuHs3tgmt$U`EVd~xzs(l=*HWXqKN#fZe^Fnth;UC z{=S{K`SiG#*V?IDSPXeX!ySn``xdG+bG%cnI^q@mw=`viHCpGbxa`O?@5U83uX;l+ zr-ym($(3|ba$Bw(9-UhL?S+CS`dG4L$@k{M0Q$la^`ZCnLSw-Cq9O+DE|ypek3jcl z=t19!5s>_@3=7#F76#x-rh~YY1;(=4C#1;yvg4!RT(&L8KW|v2>2NKguV4g6tTpv5ud-;4BVnWk68xFCL`P4v| z__B<8bNWb3SdiEWXqB*lpu_w7mi_8ut82ACbL#&eC1GD-7dfz`>PP&P=>Pi%2bucM z`$vb3{=bUH${;es1}GG9S4NXrp|PMwZkr1i)5kX%G7vq4s-Hx7MTI~2r3F;YWIszN z|HU1ZCd^Pmd)9_~m6Y67?11EpDJ!C)o|x<-ttOVM8dnQxYN;uw^tBRQD+gbTYFt-U zRMi!07@N9V*Z<79|3gP!mz~cIShD~3_Pd$-zrCYgv;VK+S$O}~1aYPbLPs`~?Ekv; zVroF);P2ziQ;)6+Bm~ie`TGpkm-oWu^;EbM;|Uz0i_r6~kV6sA9b)8>0rWcQrp4yx zV{^dw{md!#fh!Ee`Ofvv#q__bncvk@=Kt>wod_ zVp4%)g+74aFymX8qX|<19qQbF-mYaZXcg^J2$5V3Q_I3g%apFp$i3@ZdcYFOwu%LT2?FeVfyv0qrvx$U)4|%ldyV5WUm{ zVZ{K*IvmomToBQhHhNSSO2Y8CjEWvay+1oRFa|`t4rZtzxgMsY=l%67c#c=2C{=I?)vqUM#z>N2_P_EnR2FwTJI2@an~L+DdyH&+Z{ZXYUqn$Hrjv;H zpU>|aOrQzk4xU6|aeZ&##V6qnwx1BDK)2>l;#lfI?i)2{$0cToLdk$*tkGz)@rbX*xJVNS@Xhd6Nzs?}<6IgnUD7nFPi2cE#UeFrv^!y8&T5 z{y}`uzhrx9yU0TvwFBZ|XWBXe12iNQwdHRTyO{X9@>7(|Y^g)e=gC=0dYW7Bl82m8 zy)&uP{yi#>rZ(+p2#Q9SacZxhJ}}UgXybQop@&__u|LwXDp78XMeQabFKKm)=)mfD zgn0CK7`Tw5^jU{Z)By|Lq->+r)`v`&h6KG%@2IFWq-9s=H2{CSP~`ODPqqJf4CS^$ zUpVqbAI!9jb(eT~WOpt@PDnAI%APS2Z^Vd}sQu$b$dLZpylovGx0m9^4c^k{daNCL zfMuU+aiA*S!ydSQSa5CfYqz)PqgUb2?3$Rf@+_O1OBRXJqinUteRBED# zA7&haEP^SDi7++KJ;+$nnV(+aZ2sG%vzy1V$R zo%y%!4!d=Y2Dk8r$CWVr@5i3 z557)E933yF?`x?JO1I}KQB8@2~eMNpP)xXTO+ z6X|gzx0&2HGe9Vi2%LB(>ue$ZtTuI{(CT_l3>qi$Iq^CA!ee1WL;s9v-@$&@s-Gf% zcu)LGLih>bG(^dtJ~2e~E8m4*xPJRz{-syAqWyo6_W$l5A09XM|035~h<-h99U8;GG5j0DzcKs^o_g*7fCV|>V`caM4vy02|K0uO{@-ez^^E^2 ziK2ZN&w<)|_ilRw#G&yYToCt4%Nn(})smoU1#}*F9J$FBZnh5`bH_)(MOlu`FD`IXBThdmjt^qj(q-4IpVno@kIPO zCXByF{EpC@7@y+Hw-@KhFD~A`j=zXEe>uW4aDnIqb7sn~DO=r?iIr<1*C9ea1CW6` zW%EHc&so8JTyT*hn{_fLN|REGA^4deA2`v&1c{jLI>Cd&~*g>0w_6G zS(>mDUzfoC35YeT3H)*bj*pHGjL&{P&Yy3T_b8|oS z>VK(=fBGql|35rFO6z}zy`x6|Tg9`U`ro6)|3A#dzsCv`U27F_RTl?at)lCJO4}&C z;Ae$0Zr(Ohzx>Z$BP+ZA-R<^L^1t`$;HlsLdLmCF|1HHsu`%nG;cMThBJrkV$EGz$0B6e1mrr64QH-qTvmd1;OQ08nV1%g?$oamN% zme?0oK-+=sAtjTOWPzK#|6=kOuPt6|fkbs3xd)UMz_BK}ldi`jI&B+p9uJg6w9Yzg z7>1$jQm*mKH1v=ZgQ$~ROkfKv6^UyI$ShJKR__G79r;9u$)!V4Q!Q%=3QT9^GOknQ z>dd%L_1Rk?TGXl<{!hKudfAF&tkUjR)g-wJaW+sT6|-HQU%ol}_$Kkyr;FF-E62mg z=eqDvGV(C)W?AJd?xj)(IoD2~+a0M=L6-DSp=3{yW-<(ZZgK6vpF`jE7SXH|RJ>E8 zYOZEvAe*1Qqd67DH%6~qqFapaA|eHUv~GK?UC`2@J}s4Fj$s$Q84jfj2fQc#rn>wS zPn;t0;WWT<{qHcX|Gnxx_4vO=|0~x2{yCHA8V#_~02>Xk(Eu9_P-}pCk8X6pMhC3@ znOjY<_q&SPVTt~CczB$y|9!assyY8z$y1kd)Q+=_FC`o>Vl~C*bhlhi-Qao`Iw3761`3 zpDY#mkxL^EnM{bUpl>;_qZg->+ha=tbAF~TJ}Oc$tmSKZ27C;;AOm$B0S?G50wYZ? zZkne!QPY!OuAXls`fFwwM`!q`i!dzJL1BCpGhdGIiTDf#SnI6v{!qx!Ft>mfZOsEjDRd=ne z`9+=~(d92LM?EcmYgoJ0QVz9ZM_R%=)&|gv&1{zaW^agn=;43o{irV9s_$cPQTB^; zCwvTjB3({7U!)sr1w~IscTU{oXG#W{agCQRx^UA5FJ8XLA_@ip%i_hV5?Q4adG{yP zg~HDS24*7W63`-@1;kCg>S4y^A1c1CS~o`BOMFP9JSv=@98g_VvN9`2CCTQ6LS8^F zn9ZF`-Bhg`vw;=>TGpqpE^!MMi9+>LLtuYI5n~lc*l-u#QqMB`f7OCvW%1wp2dVro z`$xyG_M82G70-I?|HW6Xp1KYaI1rqiTJa&R()bUXb$38~1XN)wNe2!E z0gJbW*AbD`y(KJr_W$@>it(jV6nNNy>;&|-(uMmcLht=8kTqCTlM|@-uyeE?kzL73`VO=*(0N02%SKl zH%pI<+C%wk$pi~~Sq84(&@J+c!1Bb81aEGMnM^8H4zR01F+!XF^}>K-a}$kAbwJUX`r=?4~3`M)BTu8}WDs;Ut*!qa=HJL!{=kmO2&~{~u zGG3q6+v2q2u~|0G9UADL+uUQPiEjwE}F|^7Efdwp)UiRZjnRB=obmRhSbrr9@#0u@mvqg}T|MqvZ1- zPE_Jaln^K#MSYP3OKXeoh>K3Z;eK+p>k2+jkw=vRNUpCH$QZxTS7l|J=5imWND34e zOsYi0>F!FaGA&f)tNcj0eBxI1hZvqjNeGM3T%Q?7p+0H<1yja<$9(*&dL=uzmDCzr zxTaX8DqIU!4tuP+VT;rUYoZ6{dgea=4_MHiGX}gY{_C)tiT~;yH1)q%^3)dpRZ_C+ z{yrz_yK=x-ok0NYfCX!Dkgr3;rSn_7`4Q3);zrv(IW^i$su=cvhXbS=b^eZ{_6Gl% zo<7_nO0kQqeUzceEu!aS5nG6b^o}~+cDI!W$^CsIMi>0?Lgfs7@#mJUi>KP4 zFC6*uuDxkzHjr48mR-3<$&kHbB-)#(Ub)%)@gihMerevE@A6VnFi5jg)FA(>!~mDc z|AXE^O8y@o9yjuTCC_8Ze-riloo@r{5F#wGQFj4@Dxtznu<+wPi^%_ZvjUdg|9o|r zj{n_1?l$#*R`RS+{y$Q!-3Pf3(v-7p%Gs{4obBwBl#K7Qse0Q~y{)Ji&AtC?U2Yy7 zpk)6)c$K;Td2o2x?Efoy7QV2PE~e95)JgBORZi;^FM0D_G)cWSLrJI#0Gv~0sn}H^ zWJ(FC4xe^7y}eM-MIXy{#@zQ8!VvnRA{E|VXbf3j%!whps~XnMBhdXBdeC=b1SP+# ziluB%O9ONz{k4~J@5_Q_|0+0Dj3M_)@Z7;JmC@zPj;Ec(szdor{$ZgNLQ*Y{WhQE( zosK5a{h)tUnT60iLF+wn(FLJAih=X>7gg8Lv@bZi4U&N{Tsrzy{~GMX3k$@Kv@>Bl z%b;ONEPV9XOhgrvWOx%ap*}{e5dbm)N5EU#i68%%?>E z>%Kb9od0(BoBY2kc^32k6qBL$@fKtSsa}C`EizH8gBJO~#SRzcf8D+g{y*4%mCpZt z)IDs@|5ozU&HwK-T`=Sg_1|eBkedQ;FD)AM%Z*uCIvv~VCrfC%nRoyfw~kMJ6)5J>o)$= z>LvhV;uESfc*LQrm5ngNhBCnZHolo_K37zvi@I6FE;M~j5U1F=M3TO-KZ-Q;%nxd- z3p}u0*|;tqBx0iDoy^Jc+;gB==xG}6c|oSRz-Fw}HA?gnNyn=!Oi_qdWPvZ9S17-* z)MM|QoAJ!l|8jm@ApLWh{&#$Ibd<^ebHYW=16VMVHcq_Td*Pb9cMnQ#K z&oi<~+i8$T>h!t|UU-<9KV?$goczcYA%BQ8(|VpJw{%%5>cMP;#2K{HDWTi#2P1Lh zV*UgvT!1~3z#BS$Bsza|ioV6@?g>(~0Q-rF?8hgzMTP22#(O~83~`O*Swt7JGX3ub zlzf@y-)U)$IS)?e_k6^^=VuIvj*0?xK-h{$W2+r*fzfPQvd6Q(uX%z?4tZjelmk7l z?IXsK+XCBy*g*%hMidUA4_nb+F$g=U>_nzP$rM*IV1|Qdlm1fIwc_6w?7&_v)5UUF z%5rra<AX$5f;F*i%?Uhn;?dFp-U-Tz(4#sdPWJBJETw*U9`Gyea> z!^Z!=lBdS~KW#Wz(FSSvjc%b6T)Z|k#R_f<)*{to%FWj%N`dR75>h{=U|+>H7$YoK zhiLFsh_77D;mP8wqO#EF4Nvh|c>ia6&~=H=_H-@Iwm-uDmzW7G)u3em?;X5K)qg(d z9UmVx`~NDQXFy$hk@BaGvZ+Eqf3 zVlU$J9neY;c?*04oQA#wjt}IYcybjEhxiLVC+4ad_vezqr+T%X zSLjl{7KiT5_09=s^;&rsXyywudk*gJ+j_V;Vqc%9P@rhy>sPSdkvE<@fA`1$w1(pE zq#)XR+1lxxnXIc70^z^YX9@YQf8C3+k;l}NGz-h*|9!$|!fAa$?*8Tri75Sge|9;Tz?KkM~(bn&9k}kUy^0B13c;uAlIPT1J?MdLH-BCooxUt;Qu)| zYV!ZA{+Ga1M{c_$$G+v15hi?*-?4imeUN!@AHFL+OUWapkwCQxsyJcdl?(F@Wk z&=W(YF6x_iDB;8*-U;}4eqqXYTj|crdW+lUmY4)C1?$*4WET6kM-i*v&50u$8 zD;OUty=#?KNTjT`^B*Vnxe9wqIxq38?8a5z&&g{nJ`ND1h<+qD$QSm_3D~=ZbdQGq zp1xMUCl-MEh$DGfFj*U8kBb-6cVB0LQ$uRx`MQs(votC4td8eThg`L{ne~yHO?~-i z-u-_Jsf&D+iSeEFlGV?BQedu$tiHL!$GUxZGpt*pyYTv zmJGG{47cqnTgA@5kfAqUFyq)CMX>Bdz&!5@xEbP!skHjN)?9wYwk22EDWzv1cD-21 z`3D)~Yc`$pF{E7WruX;lK)lvwFsrm=QCp~}6!)3%Yl>G|W8_UbY`ixnXG3 z`;0@++f|>!U__ydvd!Br@(@RDRiw3b0_2x0bR2|S&OXxdQFaUr70O;}*$x1*K24%YD^f~c4`eN0fkVSIe6aSJBZWUpO zev*5v5dBRh_o4`L9pW z2s2Kn$`_sRf~y=OLbGzX*P$pNOzb{Wou9C%A%~+33>Ts2nbnx_Y7jNEdvM5P0tK`G z(b5fDyPzefE&fO0QF zzU;x^HfmuhKD8%EoyjWmNTvlcqG~+G$tS=cGV{bPcp~ z8kUw#Fk-Ti?p8d1ya*XeyqU~VSv0TZF#a?4UF?q@6!FAEmuM&&8(htRatnYgSyX_D zg@ez?;c|s@>$%moA*K;NR{^;v}_6CSU1M~%PuQX*Uar5|ao82=i?CvJ3 z#z!US?Jwu5EuSujt~b<7iTIk!y6f8My_yQP&+cw^DRuyEWo`e+!T&C{Pop-BJZG%y17 zt$IiP{YL%SnD1-y%(VY+p@&__k+C4!PWEzdg{RE_-|h8M_J8kHQ~!T8&ogbzk2u)& zkPo&!LT z-rkv&x?6>fw3JAlv%5q`3wCiqTi1uVm#2<(e3`96i|sK!koAI zGd`cZPu32NzF@}HF0J}ZK(vki6*ZAlOF%-=zd}rr3tE@vuk*{-3_yy2EIjFtI^aTT zbU+JSz{dzUgi^!;B8>;}3UGujr`<88srZeLF;hQwBvY17&-(D#f~a{O<;mIqJ7eTc z@W>|=)iXxP{@*=L`~Q1K`@O?v|6j%P3|v6Y5%n411gKR9cVpy(L5MvmaRTVvz!74d zt!Ln)kTAk5Ba}11#>n%)$Rh)FDvbTnt~~kU_!fZx^0D7M-8d@&yFiGR#TcI)rO9Sxu(G#(kXy7jWd?t-nCodLYrdfDNVVC&_7 zww{51LW)Vqz}wev*j6W?k$I z&c{N%@yKZF%=3UxI2?FLXc7JhF)*ZLBDIh(V4Na20kJ9xOetfnklbO<0|Nx0>mpZ( z6RD2~zUpjkJ$nZJxKNS=XnhWJyMKZ<*zfLl+uc{~-j7?)o;?F+;_a=r`Y-sy8t%-6 z0Y~)DZB^}PZ*PS8I2=gL(qg(AC!!<1AX~&D*rBvH|NBkCeWQtrcm|6x*B;IUwRp_*xX#MnF0fvNvYkGsuiO(tVJVbR}_3K^v{gpFDt}qJp=L<@{Os{v< ztmqAT!yWe;T!d^aegRP!qU<;pUf|{j1{^X8h>v{EpI-F+lt6bexfSiuf5ZoQSb>y^XSIo?l7@ zIuDhqUk~3P3-%7UL>{_@K3CMlz#XCpK#vg+2Ck$g>?cV0^3B=ncW*iqHx~HUGj6dl z1gVZ46#!jJ*0H$iS`mJXI>kO@2(BH-xreO#eb<6eZ9)ksxy4cdDctH92eDJfHxQX& zEu>D#B5?ar&uS+%x{LzZ?9K@MT{Q=B01?m=l`=`;iKSs<0dnvV3j{KSP{W?kBmT(Z zcSYb6hX6*7iLKA^HfFDJ1oC1Y}-!xd8U6qWN!gIg!PdFNYS#)((X3Sq?>a-dR=GEefES=#Vfsi>PJkE* z>_rPHgT?j8G5=nheMfHUTEba?sZ-?Zm|NG2>gOZ(Ij5Rsj|p+{a0)as!JaJ$IW`8T zE%$bT*}SJDq`+nX{j%<@19Ax}RWT|>Q_1p3S+Yu&19}+BDxoE7w_F7gNfapbi*C$T z2^k245=(~m0mWkwTrNJcl&1BoJwtLfp$QerXcnUDe_VZdA1Q$Wq6{;hrz8+RA!j3c zGgM(zOW@%N=E}gi&e{9~5n7UcSIc$FveJVa&A4>B(>?zBeR{eL$$Iz8NDgh|B?{?cc$vsuiye!zl7y zM3F0y28E6+6cj4-Axjgr)zApwSq2nELqw4u>A~h5LA1(a1$}_Nz)9dql@o$14>~s* z)X4jRne{3IP=<*LCCkv7t~_z#p^l-79$33F%)|uC2r`5aUk%~oi^$uDQH$y zgxpc$^0k`&r4FUNmJ{0*W|0h>!eT2{`)T?3=RVjo;aV&N1Af(a*7YUgNCM@N`1l~xCDjZ zWoF?(9%yW%&ZuLE{RLAEcOt7fi4Q$lPGl)?REyFYe|&>i_{F7XKwi5s+l>Ouz@QaD&jZ(q7|v^h$)zCa z$a9rRr{l8tXv0qM+nMMlRYZDwl+@*B_)7#}Y9&|7YT9A9&h?IfWB|pEOol_l z$jT`u_i4ej(hoY(Q{PR@T>~n?n}m!jRv)0K`AnoGKvI}1q{8nPf9FP%bpaz2lyqHc zJw9@Y>OI^1D7XjdLkVeo6uDQo+H$ut-&;aPG#?TnK7>R`JL{zbS1n2f&tS?5B0k4{ zXnTmGgH0JAvd}R~>?Sh7@YfEZsO|df`ovf)il)r(Fu-kfKq6Xb97Mm$H=O8*sck%5 z1)YiSJjBJ%76s{V6Zr~+M0t&wvW}BzjuFVLBOkDxGO6(}iuv1#W}7hA$|eyZ+(^L0 z3rL=h&1}M=BevP?r23K5Cf+N|B#kn79V6k2Q^?Us?N-;9$T$duMXZjI5*U+oI6wlC zjEByR*eK2JD35e5{wdWG&Y|?Yb7BT96jy&srV#iRL~hb=H1yCwq(%xlzPIkMDvz1Uy1+)Jd`DN$083zx=7bZ3Fa4gTk-Wv|=qw(gb6OHtrK$3E?pKGJ{%pBOp|f-^c1TC*KOlWBPt-`zj`dVihp zk>#14N`%@@Es|^fD7(%jh{@w+V?bRY#~~AWykr2h(+7y*FRm_F2YiwqJF%5P?+#AI zU__Y6oanWwNvv$-fi6}t;w$XRNRk*6 z2w^J3pQ3;whJ3Cpt;lC5*DJn)3}7|R}L_I@34AgNzM4*uut z-EYw>3klS6q9g+$U>JM;mACY6OoTOx7>nN0GU=Zf-61Ny-AZ{>EDe3)PbVZ~=Cyds zj1P`kHaf+?CJauYEGlyzz-T{^Qx)J~)3xo`7nqWpjJ_TZv=*W5deced1qYwrY7rO% z9M2f}f_hD#${zwL68IU5WmFy>y|3N;z`)tXTc9GgcEKRzQbY=}-80Lp-S?7XqRj~_ zSsHjy3MHw2+mn(sbO*HuCt@QLgJD4nN4j~g-xC*I5X!HOOJQdLVsLGDS$ya( zmc`&xqu2B?7FI_9N2s$%{AXVL?}zsvFW>(B>Eqk?^@ISI<$vAZ@22Db4|+}g=aoFq zfDF&WgHQ^}zlYdG9`+HF5xc(%dr+BzvI&Z!dR36>R<8qoA@oLl7pLmj1unWp9totB z;)ro|1gktg;;o;lzN(78H|P}-Pm`^IM>Md0ERaLhd<-Kt7PpdtPOuK&nQ zgZJmPc=vZV81OX^5lUf=W;Sdt5+QPqj3G9=Nr0vdT>E3sJ62KzEh zSf~uD$;GAep#)HCV4jTh0ozK$6Q8bkn%m$jesbi0ME-S+F0wU3ctqYrbc`=)YQ$PIDqFFu3{=QKEy?FYa3k- zwa$IS&lnU341Cq~@zHLy5B^@Miv#NJ-+B6Cb?MC*=PE34Sc#QNmyM6l(Ytt5C}KfA)wpvO z`@b$r)Z7+8LC~uu=aK9Tx%ddYm}uN9-U*ez0$oK0K41L!;m6aRsLuUM^=8&)NS#Ik47c5ZOp!f zPAzg|3q&G%!p6|&n4h?rH3rL_!xO(qxYkJt8(Y=Ql*}{O-;U5U=a6xdE=!vKvQ#&F`IjtW!4~J60C;TwE3Q8z|Skn?3aK7h9 zn#2)wLvCLetKv@*y{WWhqJhBj<+dhO@nX55ja=rGX&=w=nMZ8p$T-JGKIx(P(#x5+ z*jM6YIrE3f=o52%7;rWkR$Ad#d~~yk!ml8>{n50EA}=$S+?9T3bAxaB4k6XT8J68^2)^9U_y}03}?3&VA2vT)*Q%Ci#i@Si|lhe=&rf=iGsMKZ2w_ zypaerq>a8+Otig zPBe{n*)+Ngps+mRhf4930kK$1l5~O=243_VpBR`%J_E>pSyNITvg@qJRy{BI0Mg{D z6a>-?*+;^vBXB6^e&OaHgCU_o=(@;B7*M%_RqpD;Wi-H~p-5(c3}CFLAGaF2$BmXg z>W<~0Yb6Fn+`v@MnSkV8z;N;+ zje|}1Q_<}&wFPF>0PnC*gPNYVu+-*uEviDK&l2JCTp?kdpM#A8ye#({ z;$KUDF=cG;6UXU6>pJqTX61L2-zl37{d*wT*vmcQEttbED2~xhV-`V=i%~GHM9w}RRx#1?f#x?g~ zZYb!&5m&5pZ*Rqhmrr2|zJIG`&C9CaqMD@~OuvK3f3wg_@jLidEpkGweiMzJ%v$pu zoQbtpU6F7b_XOuumfp3HlkDt*)--+?VXUGkioQ!?oJrM+D6y`HqVo{+K;OY>#FkZD zRnDWOUSA*4aqntQ_R9EaYGpN)8~P6KA?}&W zz1a91%&-n-QZfx9aR$-JQCqJnlz5i%2}~`S4w^Y6$Jm1ZnVo%({g0 z4VXJW=~NQmB`ZX_8nb&He+TaceQM^VBE8bcnl%bdF`E+&ET7nHV`+;z! z;;JwkQ;UodXJAbvrTTo(`8u<#r=mL~-7N2qggc*fdd%_a)22A_Il5%i#m9pI@&N?@ zCOvNH&prg9FwNIsl zQ^rxize#V4Zi~nTqH4uWe@!)y)9>Kh`~D{qoC{a8sg;R6Bv@rG;?SnrnSYte$T=R( zbvT%%RsrM-#T6W^svhG`W3O?<3QoZ!MSS4LUh>=!6!$qfSHQ-H^hRO8XpABFtQzBm zbm;lyHg^wFEw|CQp0gFH7#7WRX4?&*OkC5iz_-W}{+lG6=HYhw#2-bY@1&RAq2PZt^&3s-1BYlzWUIKNI`Vo##DBCsdxoZOmJc#`W z!>A$XF|5Z`4KDckGzn`s37aD{D}+Nc%}UlqqP+}fNzlY=t}Kodu}capjsqEbB7gz< zD8v#8S2BVAC~c%;q4NWV^0bMbYUC(fMfE%Juj{8i30>K~f4Jnv1@9>hh4bhua1pwS z=S@*w{-g)usBq`Zp9mQ^D8&dW=Q2NLp#*8B`dSj{C$G%nOny5*10F)WS9&P8K}eqc zvTis3-~aRfQWaHKN|GWEa(^9JIU!72Iec*PT1}nHAd3|UEFiz&p6imiK|1B82qr^##0M6DC_Q9{ZI38w0=s^Ss5Ayj*8lKTkSxgbmW-a^|>_mu)y*%H~c|~n7fB0La#IgTQ}$0?4Zl~=w!5k_!b5DkQaN?_g<*il z$hoUUj09@R^1Dbvu4#g8IbKlIJyiqRJRl@-e-|AU&DuBm6iku7gH!btP@hR7;=HRc zb#u~c?6&@1latQRj|}7y#Z$&DTajx^=Ag(A8M^`E1|YFT|F_tuSc*Hb_I=X_aUa2e zPyP5%ID!uM>avxjW7a4wzLdtO%$J;>

nY@ravE+*ST8y5v;%c4qT^6b9mtc!ua_ z&**B0-x)`3C-PE2DEhg!LTHa-8kDOKmkscqKh~LCq_4LO7A1)ir4=y&Hj$fBFXZ!h zMA#%`J7bNnOisT}@qjbF#9s!HRGeUaIQMx`$k5dgKh!wtsz@( zG_P>sTFCDaM6Lny#NkaDqKo|r+i2vGfEB>~{iC%Y^j&pbp+qcwB}u=}hYAS;w$-{~ z_Y(BbBfu(oO&gqF;55q##l0&DlT$e^=w*h`j4(^W=2X|7MNsVkRn#3HM#k4iA%p*#Yv@m zDpn_*fz|NDd|@Ju$aga-{^6dM>RAZ196N?^B&dcE0Y8*?uJ?h&de8dT@3c zk4ITDo2vpJst@@T6I_&{GIl)ni%VwRPblj|DNU&ZQzXDMW+j$dZLg!1%6t7HNnh;P zaQ^&n6!5FOCL65kbe5~f$xXQ}cmcKLDnNh|hii!d_d65NUM`wJG;H{g2@vJZE)mdB zv`Gp(CRSB8^Yr}OZp9RkF~Ao04_ql5BDtM?z#|-}PX7T} zP+_@f35k@;?7Sk3c(BU=s7J)*wD3m}t22g2xRbZWt z={0w04uczn2U?IMjc#uU$5QXdX9+u&=(|Bo2dcAENl#eLAj7`aFo{4qY^G&{EJ!Cr zvBc!WU`Sd2$gF%MC>bHJh<7JefX_49v@PXbFZMw`t>r~(jnm5@Td{ky878E^%?Fy~ z7r-v~JQVy3(7N81j55hXNn~Uwg272r76wSwxw4Oa8i3CXR4dBY1m|PYpL;$p|7Oo2 zJ*9i)Fxrl!so(GO-J%X%yDrb08iRFXWQ#81#uJ=(M-1nT|~OkhrZe?awOc~VT@=@h1Y(n*ky)UVkRcsI-3mM)(0WB5p52{9#*FT^x0lHvR`4>azY!`Q;s`g+{S(M zxRulV$#}(-%1LRpHc>r!e2Lfw_{yboH`EfB;Bx%asrSZGU&O|FbdssXktexrqV7V^ z(X=vA3*Z1mG-+9dW#m|Y$==}+n}T=J!b)ur(RH;1rV}RyCvKC$bCjl58U)8|X~7Wt zcHM)1aK{bIC5s|$R8Cz%m2(66x?mF(Y^N5T5*c3c$p}XaMPzSLT(5YwsR4CHNdXGI zq3ldBDYtxys0wnEd6v82aPDKaePegf;5-ooP!el|xN5f8k|E}=p1@q43izDPBT0!p znE^YY+5N~}d00cZ(6UxMBoPgpfAcTovo_wiY%EnOmz?D@O_(Fh`8Wr>IK`#U;e7Qh zE-#|+fzsWGD=K8~tHRZFfmHC@jmpn4n=t~!v4u68BoB-t$Mz7oMqUt;EkkLU;G!G( zg_v35GF!|zb|h3sL%tda<_?M9RF$^1qX*GV+=LNc@-iaFY*Lww=80e)J8oCMs;Sp~ z6jD@|_ty0~mF@_UIZ;k=!W467=MxFg&d!O{ZRr-^^5#TFerE`icg44Q-S+S5>{Y#P zqh2_WgY5!I3o4#0>g%nj@1$if0y{f7724U+O$zeH6U%aEC$~p-cGQ5%idM)`D7^S3 zml|*=OY@9l>fiv=b^gAjvf?H4?8EJZGsw~M?aHW7zew2IAT{G6SPG~ z)>slziLIIoa$ogQi7pT?O4#*O zPpYB<)rW`Is!&n77^x>aJK%}{bc?J|8r{Pl86#!(EiL&e^2LpZbb5(7-kF?{FHi9e zaIX5kk9_c1Q)6HJaC#O4j-)9(2l7!2x~8}gslzZBy8QqDZ2CQDHMc93PD3N*nKbg% zULUjOB;A_QU3Pu^%qtS$*z%c^-}OOv)5 zZM;rO=9w%aL7qnUPU>Doxw%DdZNjrls^i%SCv~|J`fW2zpYN;0tDbHj`tbF2(csv?3=hZ}Ci!VU-~mApmDegY zq1%;~cA)PgI-Ia^C9E*`B^JC!fJXP{Qu=C%ApHq>jhN)3A%ZUH#rQP$YuKOjCFM3-kg0H-|8SjZQ#+r_o2OyG}82lIx|OkS<@cY5=LCL$nzA+Fd1^t@~Q-(%@^26 z|C2N%Q?b*aW~y+d?qys8DCSBN4&rza5$$lxc5r}E8~}>11d3yhAr+{=fZ7Y#sF0^9p_Ee-L}YfC zG1>F-O!l}3ffAS8mFoiAh}b}pN-8p>@pw-p+|*cln{fT}4W`4`KSw)&i-nDZ;&2~e z>ly_#;ib$#(Pt*0rF4gw8^SgBk2~TPqc}(1eaZ_Rr+ILAu)nPgTJT_4chDF)Kq>xN zo6m&gd74*IC~=)SHl}CczOX#S?qrY1Mx{~*pNNg>N_|@6MmlwFKUkLrgeNL~yhiBh zBW{o&Hr@mkYIKzx`RLm+#YrTneu2DSrfM8nERYm z`zsSRUh$dlainTGbB`0V_ixL=@eg@8k>`ozJ+eBvs35o^VsWH~!LBYl4PvG|h1fcg z&PriLJqlwo-lQ`1;-=@)=SKz!RyM@JjX0n2mc-`@APyFkD~Hmd(FnLkNYslAYhcR|IN2vkULuhA>I zvfR`uEKmProo{0*LlFW=ye;-*3p!4U{5;x8z6mTA`QgOAyUJlj)LFwH(8H2Q3TaZIw z_Q47GL|kzOWnJ=Vj?&dPzh1mQ0ZkhABG((*?C-Zb`}R?%-Z|)W>U-9HtA1p6_UpU- z!yep4$I#mAHC6xpopv~Fq z-KSS~9y$TlsyKdgxd>|#TOK!{{fq0|odB-YuK>QaTR6B&oMlcx`zKrhV4`mA1iaOeREooh((Q%z!A38*b94?QncFwp+gU*+~;JWAtq)4JX6 z)#pgvl11=U?nKvF%sk{%=f(7KF(tpn3|saDF5^-6$c>A9I>eq@Y0rNX zC;6eUxmWQ&VYE^ybIi_9&X4;n9zp-_!R8!C6vk;f6aRq^vCv~3^Jm@!6}aB89U0s_9ZJBN){ zqt$4iv|F9M<8LxHeI72}*%4eWJ3Fxny@==}_bXwR9aloy`tW0?eE0))?Nd>&Z_*wz zvNikG;x%chnj{s9Do6`A5NcE^A~=B-xX7$QtWY15V0jB zpg~g)LrnalcXg5Lx5TEI&tR5R#sXwBL^#sKGI4EEYme576Y#w**Pu8l%Z}&PkSVW^ z_*s11hK@IcZEcN?#!Zq)ge7!y^_-w@m^FglFa1Q zSL;Ewnr0r!)lb)kv)+E}Xh-Ik{^i8~f}U4jX$-I_@gMj0Tb*?L$9AV<;(u-8m&uSEY5^5jbI>7CgsxTD zUvAeDwLYCdw=;kLHq?B%Y*-I}D!N5qn(G@Zg@Bsz7crn3c{&=+;`%NS)l^Pw*>U3J6}OvL zczWEg+3CcRLB2p!4uI)F&ZZQR6Vi-jJ{J*a~7}qsJF)Cahw7)N|Ki+a~}2#=j|X7bs&Q{%^Ni2ZyQn@11t5wP*1ECVux70IJc{Uq zCmdk+z0KZxIhl(0pJcVPVtImx3IJbgqDjitJ=-W%zT5!g%(0#OSarfr=@*hifxX*r zbQ*10iCTVfdCkRz4Ry0k%rCdqXiYBHgD!H?MT)z2w#Q|Qyv%u5P-N8Kv7Z;~a8ZY< zgJ}?Te_S&Fs=W|9!F!fnyB0sydgugH>oG2=*Q;7G;W1i05&Vns^C7l#o??r*XT%{s zH#Hpe;8v>!I_-81boTdapuMeUt-9beZVvcg;w(({bg@h&BC5)F54CJ#lg;|;ePS{_ z%+fM+>+$N-aI__jKDPLh+?C68HzvR83Xs@!APu@catO4$G~~+Te&}?$vrhm=UY8<^ zxHi34o8({Ke5r=+wM%Z?FV&i!p#v#>^Pex(zos94sn#NGA5Red^`-jodACKsZgbDo zt@MY9PM31yw^b7b#|3tLApVyWoP$T0jkR{Gl_O4}*#sB7Hxh!RLvq8%q5c^S5I0QS zZ6$>c2me5KYu!!iq=2=XasLn zYm;MhgkANbl^T#alOY)Thz^Nk3;O*@DLz8~8u5N)gCo5zRR=z{&k0S|a#%e9yIN+f z3J2QXx2am*vA;7}^R1=RivM;4{;>~dtKNx$lHQd;Z=9|>Rj*m~mf z6LU3@=8Bb^>O7CoppS2pkV-y$N8I2N|H-r5Z~aYs&%NDJ-|%OJpYY~?gx-X1@B3uL zzGL-Ec2FQHUj5WOO}!w|2NU{^deFaiu$!7oee5{N9bva!1MT)v4YZGr*`#hCCFXsf zm6O>`S;l%cB9{(Z9*%dn)4M~_JmM?x8F9i<8r}+hRdKhe-JCCwTWn9qJ`7Otj;<^K z?zLwgu~S>}YBuf8=&jLuvBM@7Jq9AW6!{p@WD+&w-$5(j{o78w_EBFz`_!nA z8}8nOArUQaG}YT|XO(-ZY$mvnbX>xqL`&;KmkllC2AR!A?Ynuvm``_oq8#ZHg}5u_c)tdYkJ9(=$MV&Z0wUa_D*}ojeXZ_>}F#(8~gN) zofDrdXvjCuR$sh@gEwzLYncmyWY0eC=KTRu74r+cgV@n0)3e^d3(w`y!H}M*&GtSX zyFKV<_7HB&`T@BE&cWGkC7z?aOWu`94mEZL&MZS63% zttIs1d3J5PnVCLBcrfGw>$FGU zp~UAsshX2|^%hYpo1H+Y*?I{g!=eS6E%P)Z4eX*DS+mY0Kr+h~E$^4$APaGMlt3Ta zIAr7781i80NdUk-6UGuWdY{=IBO zp@Cmy?XI7|=mk>2;&RpyI)py5bCxi%P<7JFO;^DMD;$M_#4zLhF%#V2_W5z$Z{aOY zZC|~Rvs0$JgEdJmNvy~86;z&FQmONo=xFWy{54bkJ9s&u@hw;XAybvK`yT6506tz{c1mNhTLUk&XGaRFy+rbpm9{|ik|5d z;Fs9-K^6Sns?#I&Sq_ooX$ePPqK?B)xvS*J@>xXufM&vdoAo6W~ndIuUWU z)`B=QCK_4DDUju1iI8QEvz-K%84UTj$da|uph_?qRhAARmdSJ>9}iA4z0fgY$?Py@ z2e)gv#9+H&H{H7hg^Qt-GP~U6 z-lgggvSXB!&kaTbp<@J7!B>~!KT(&4qYazQtj`H&6=3TESqP64q!cSv6Bg`hN`Q4R zqX-2edRj7^paou?056j(*Brw8p$u5~lc*wi-~5F~SEZr9UJY++G1U7)G`x?bIW_v- zd2nz7IvM@$n)SQW0C?H}J_msFvJN+`-Zd~gerRC!GB6vC2eW#=`-lw$H&AN1-d%t; zY^c5sN{hC6Up+^a=II#m7_=XG&B3p%=4HI7e@0M^&#Wf^)?U2Ya&>U6EI4epj5w^F zra+pg2~8as;p*v65T z<%R2lRrx~YzHre9_i=zNm~y6(i)6;vLORG~o(rFYK`_aIV0mPBcK+)kur6ceW6Uqp z)t?#hCA<%T4y39U@iSoXC1Sp0JvRsWEbbVV+AL;gu!AXzTA;)&Ltsf$%4VCh|8B%CK% zE+HNpA!dY_mDSsF$m-b0YPzg`EQ74xe|pIxnznDf+Z5pVVXhs`h%85BS4q9dj5BJ8 zZ5z3(f$+cldSxV|UKCI(rKKh(+E1%p3zO5Sl4|GdI`1K-0U7vk)cqrbZh#%M)oQTb zoY~i^s}|)*6D|~d_wtj!y!ld1Dzr;LJm8?3@_m|MZ^|#65-(@q8<>~6XAZBdNN0XF zea=o4@Uanaxdr@oSpgp#0Y9dIZA1g?s!sLC*h0m^{&99;&rbCcTkw}zic@`LS1Z6Q zv|Pj7eYaw9radyAnLIbUu>Q2zODHGPc1wJ)Sa~nb1dNx@+qwUc)@Yqjsyamoh#&n*WMz zjn`%Q0cvKxFUy_==RDXr0o*}dwWHe&8p0X zB746tzNu1ch(_=?FA0hI&v9{(Pr@f9lq@8#=K)0zCOW(j$ z{nC4F!Z^uMtnt;xj-F3!Nt}zPPSZPDgr)x4?ZO1vu#Ny`15x>1k6p_NZFHJT!L7vd zNlj@zvr*^-IL8D_Nz2XqXqu#WkzHYTHAAs*cs< z+e~BFv;4$z6p7YqFG7m0L`JzV?l})FJw7NqJkB%1(g)AA4z&AX%jv`zeL+_CW-K?sZB5nI$E<9pDP-w5kunA<5WwRIbb}*%K0S+ zZ>JoCm;d)`MUEWdq7WhHC|(P>skfD_wF=2XZmNmTJZxQay^;J_c1c&Aj`VY(g8ls8 zx~EQcSO23kivPW@{kyIIvD?z$Xcple?cZ%%%#p9E4eTtZ4ZNwP=ougA8SABV_VhnA zx3fcAudk|0qWk_N+&QP(^T`dBYq`d9^V9oJ{|7WdbWlq!`qhx@0X3(k>Nq}1RURt; zwW6O7U2BN^`iQtVAU<{n%|3PzZOU0u=QU``$ygsk*LJWwsC&rg2Wi(rjek?(PAa3- zYPAmb_V|BWtyb#42c1@X@4wo6t-ZbdgU-R>?tis9`+K{s{{pR5HFDA~qyhB*tF?4l z>Bc>gpPbv>E_S^zq3XGx4SJduK*l~QXsG5J)`u?C3lj4&=Y{H=pR%vwDO){g95k}-+Brpg&_zye!J4;P zve@JctUAJR0qXT6%0h0gXLbyjvKM<6agzbZS~bvVw`-uYzh49CKp?7XPUGf*{{>DRpu=^zP4p@s{p3R%i(`Y{#=INH z?44sySb^-lV=6{t@+2WlEiZ-YJKLjtOdRWV&S5)<7!9%p4ut^)ig*o*m<@`&XNw65 zbkEup-G|N+0_Fo!~q&3wxe+~e*MiKu6U}4KKl>vw>GT!_L z=r>9#0grYp{@7;!+5X~Y?)?g_cXBR`8vB`B!yKEs#9*1f?~Z5ayeHUo$Hjg3!|M9( z_?Zs8LE7(#05TQ zALrA1h$2J{`aT))Cn-?=cu+g_Xi2QSXRx+>Zm{+f#@f;Xd_$P0kfC)7BARU8^+d7y zX1o5`f%Wos-%~^Dzp5QN+Pn==H$XkN&IYKL7N|eA-uh^9EWJA&OHbnI-4NC;4t#5X z_=^GJF=FvxFkF#>(ITO{9AcRVu*?mn@9zO?Y}#9*{J=)i0)jDd!VzLpJBPv00Gss! z8z24Q8E|93P2$aJwUFEWyMX!8Q35B6?_dFB4? zZ8X~{fFt4#0Cb%AS))w)s3=;({P3!sh<$1B)oSq-fE4_Rd}46cN^w>~F9jB*=ZjTQ z5nxK-B>~z37=#i^ecr3@e{JkAB*ih*_BLpOjnzdKX^>hTQj7S*cpyIxsSQ5c3_g>q z66ftH72+{S?Rg=!B28E@M0^z{ef^{+)ZwL5ge?WtItHq(4XPQ4_LvZ@NVlaiwWbh} zfkU&HIdMgmMO+$=kZll=(O4NAl*;}I;GoA=FGU2wi(r-+9CZz5F_=ZZE`h<*M(vzx zaf(ROq$!$eg?DzKcgL|+O@6mI!zgfnuj=~ba8XK4WQ?Vvb{S(S1C7$z8)&3GD$wUF zfsKxj4Ky;)DDKKKL!$^7@tc)7td&%}9WVzLionkH7k3+IYNRGe)MMUia7?y%MWT9S zp;l2erOkSR&Y-TE+ar|y!8?7P_(~azw1^i~cD2rvI;%L;m8*22EEGa|Ez0dE2R%9L zdOAiv9B8xytg8)n-3)fMk=45kkb$oTz9!y0DfpVz?Co|IN3RFd(W_wv5eWP05TS~|*_9%oI9p^%NWHkFe*8);-}%(WQ^Hvt)r7b(x+E{< zVx5f;Qye2^Vw9MfM7*Mg&p9IKHTHh>oy$Ad$_K=mfG^WRz7z)pDhT^BA@ECK=$9hq z_(TYpbPyOlBuXLTVy2avEao6tOtMrxVoWk-OfqVW9ycaWV0qDF^5VxRQi`7yEADw5m_>agW6A9O z^saaP3;kELCFl`l*x-5WSS04R`CnoDANitbIUx;@ul}yfzw0is5uF+HKX*)*URQ> zW#g9SX=L;Du~W3M6EvzaTvvQ%tsXYo8oO?hzO_KxTC8iGsA(s5>mu9Oml{^RWe%S$$#h zvIJRpBu;Y4bMkag)Iz0PkxEW3bk%lBlfW0cYAbah&r!t{sb(fA4;QUvR>hmEG|aDO zW?E#Gg+;2uLPg<&eqJ;HVu}uY1Os+-N#zcr9lYAR;{haV>9?__p!$J8`}6+jE7;OI zbGw@O$05+(2fth}6x3xi zmxG!ug>vW}4bZ4711u81`$vdE)OC1m+00uNswtYu(*aJwj+5Kc_M$Cquehb{SzG$U zqAgumsO?otwLP;~&q{(@S+&7w>K@;K);U0H>F}Pxr3RNix?-z+umXg)XAqu2c(ZA? zB1AAkBjVq6Kcdm;7-9$Z9Mt7z-3%lsSU_mk8{ScDK}UVi_mNDP0tCVuFr7IM37XB= z%4VFK=L^t?edNtIN-?k`zA#EL13nD+h;m=#R_$;B?6GgKhru4RVUMi{A8^YRp>l}v z#J!Ei*g}8#&zGuCNYJO>e}=woV|x7?=cZ`<{Y&*h2N4b#Z~(!Q==lgL3zpATI*0F{ zPwW5sdeZp&_Lu6z1OTH|P zxl8p5%L8quTYhIn&3}RS=veE0^B04$)`YQsNO8QGYpx0PH0bIXqpQp{*CO!h(7-DL zujYkUE(&gl&j6Bz@ECQG?@KJ8sdF6ieT;%(W9Pd~tZU>qcE0x==(@;h?0mPNgV}$2 zeq(1QGj4`SqDm-S;-aAILB5r#y4YP#y-M$0Z2jdwuNym1Q*J3f26;I$pmyU2y zirGWLphrTtoU`Xj_S9U{=WJEe$M%W$*y4%ij*Y1WeH+>3l8~A|Z+*h{n%N}uc4afD z3?hnVP)S7;O`^GE^yA88RFYkYA~z`G4it%JjxFe2Npv2cgsj1p5Bq&=8P&(Su9uAJ z!>B%t>LbUf^|$K8)&PQPoF;YqX1y)uEcq4a{?eFe_7XY+ek{Sf5b_Flp}4 z%pKZu2a*+;ZI;dSKs?THnI@SO^GHi0>Z+K@DZzo5WI!B7+7q`+m|$bnsSb4pLY?L?A~{U9RFMbD{)$4g2?{sR1ZRT84QzcPHlkuZ zR5XFPL~nIrhNn>m?p6$8;4}Cw%KtYT^Pl)l;s4v)KR8VB|LyL!_6`5vCVqzhudMuk zMmJ(;{fug)?5dHC()vwNKJWoD<8irk9y*4>FHvFRpz|V*zqJiZ6*l{IaJaoJ1%=^oGYX35pr9}eY(_z07}!<@JJyOZ49VKcF4yMM zA(R2oncF6F+w|PQoZ(S>L>{$?{AuN8PMgaAH6$Yxk$*|BUS|(yX8@bb|8=<6-plZR z?d^38|JNpd)A_%0>9C^JX)ZpjD9MbBSmilR79J9JQ-{RP-jq{f*8PxoO*;!;su?74 zgB**DkXWkH=XVQQLv)2lh=dtPWD$q>KS1kx;FFLaGLz>J{{A-x6w*gHOZk+<5JB*; z?YC;6eb}jiz5N0_|TU_-X@;=KBtpQ_Q1yV6KT8TxHSV94p6cZBY3N%({8nrM|<_6 zH5Ca)(E9_!KAH2dp5i0)uaQra2xf-qus^5HBu8a}@L@5}T}QN0!7; z$Mh3Qw>`Dq$S(Cf><+HP)N1EFpMZ=N)gTAgARr(>w<&-*Rsx%fQ&na@bO%#1>r4QX zBY6832Jl=dJG7CK)wx*2b&(ZlwC_iaf3_zC(m-f^*8Ok zkFwwJXN8~e=6{6VB<2DIICYH?e-ns`S3fmRQ!hyL!Gyk}9`vsrp`y`-bRRoTl74cx zT?6g*Q4O?@j@g3MK1!@<{m{u%p0S+ud_*=KwuBzRS-JjPrQ5wt$=r8yGYHGJcI?v;}ccM^u9Ji|3&A6DJPVUVVy8{Q&0BS2apSAjUKhnoLe>-`+6IT7|oBW~P%3mU;j&+{! zlU#F@F4;heB&0Ctp@irm?esD)de}4QL1kvpgFz3Ap$8g{a$rLt+sXHts7^G`<4JjI z)F~$3RiJ#t%6u( zl0ok32v@RYFesGYfH`Sc=5D|u^elM|*xo4Q8lk|)R@QiB15j)NeTxBN(}7sdD}%(= zh{RmtqIJNqLX@>W9G1R$op$khJPC$nKv=tNP?*Z>F;Ez=h_7uCd01@MV6oC;F$OeN zbkp>g^i|Un)#?;oE>V4k%mp6y4R|o%!GMQVf``53z(dD?2bGxt4+cCe0UoZy9_n(q zz&62HWcu$WSzWq^jttaL!b z*o8a^paCZ96Lcu?bW{p*x1_ygR9s!RCW^ZU3j`1D9-QD3+}+(>Lt()Q?hYZiyB6*q z+}#N-g;X`~ch2qp>%Zf80Mc#@=J;lwE60c_yzeym7N%!^U`>cJPI;10w&CC(y3h zFC}B;@1&>5qbHE)Wz^S^tJ7mGfyF$GzPw$rW1bc!B2?kGN&q33`3x2vGSMlM%}*PB zuc8SwA`a{0gqfK=?<7I$&~PNffAfpFWnax60%4;!Hdcc&+_Z+3xL}B-plBbPMILZ!!>uJ)Pb_vw9~T4fah-y z)BO&vK%XM7XUznf?!_|0B8lj$=!PkM6{Fyj9AsZFV|n+GADXKTZugsSLdq#nn3(W6 z+W4E!J3o6<1D|qC(DO35{eQ;4*wlkuRYRSsQ!`Y6&DN|4oeoHy#j)}P@J=MKn`qv8 zNS#f(#)yJKr`1+JVT#ok>678^^g<#M;R&7jKD5TzyPboKb_2P>q-Y`I>)-pf=SpJb;ql(o3T(!y`TU-_9DH6|?fN^# zb{Vm~aeU?Znk<6MrS{2toU~&!*SE>>awYyzHRR zmkbCU0m^4)K8@hS^Ct*M&a{MOye9(;o+b`ig-k^K#%GZU7lQeoWr?o6Y!f35$J92D z3h&RQ(Eago;uCbmt!hkvWExr)-KyxiC{p?JDc+Zh1U4uJ>*Ha%nL~od=&bK3Z17mG zG1=I)VKbP4lfOx?<0pC4Q-lfo0*miP&~FASrmuAfUw0|U5C$Oa+-NXyxm;PWpV0aZ z`zecl6BJi`$Y8l#@r3O>vbZCcls(M6`ClNi5YHrB1nT(UOsTBtAV2t!??jdWm_+qpb zZPSuH6re)8M+oFEOF_Xz!H7Q3G74l8^q_nv9I*H;U~&jn{N92tnDJuD2j9HAMcT4u0|MN)+naJynnwfdS zPxI;eAj@L%))(q}Xnff71{6WYv56`lj!~Yg1ZleQ`|4H*dESpoR9G0MZt@?(V<*%0 zZo(B3JavsIqqWRKWR%v)0@w*RBVQ~ij40?NTFilpU ztg99&vFXSa4yQ9oSy)!r8dHw4Y%A4=8+-< zczCZorfGTA2U~th%}q<>qRvs&gqw%fUspel<6f@5CIZW@fi-c3fF%{<6_2##h$+%n zKik*KjSxjURy;_Do3rsQz-ix0gd5zVFM$&U`;U2c|0}l#yIPI(Rhi*Zi|GDgtL0#O z{=Aw!Ht$LJY1}tRHs4WO`+>6Z z-`(jeK)E?+U}&$skTO1TQ69_Gv}ir{A%p36Eo>2QE~DN&kR=5kU^T_*;1R2$%yPw% zQVywvr}X2J@I2>^7}~N4ONu~*^S36i;SG_0uA+Yn|MMzpXRGne|2lpm{}ePJ9uTN9 z_KSfO7sfY4Aq3d?15CN?6a}3E4M2FMU+<@gi}l?x4&3QeCQ{dC?Xpao5VR)!K0} zX+IeiP#*ReaU``p<`}(s<~>Fw3`v5&X6Y%X_m*{mNU6WMkk_bO$bPT7wqwNQ9EZi+ z*GBw!b;?&x4>$BGxQBW=rCKttQQHe38ebUe@wi^hyt>gzp6~ViXU;fB7n+oc+Yk*! zU)3iCPOdn_0D+Xq`|0?K?K`4(D-{1a@9|82zt1HUb<~yrE;b6uYSV~yXb2Lp@-p{K zA0K&2J<_iGWSk4nH#dZLc}7iT^VQh8&GLc5lV^F%tFv{5$cM*>>UQmjVrl5F@xsLy zf|aSo_!jD*YJyras0BD!19W$0Ng;Ii??W@(-$UAv+=bX?7DSi$^3QNRveX@au-siP z+E#eD1R$^&*Y?~WR>nI7OXZz#DZ@Qt2=cbWVZ7` zTe}2T_;KiQgZwk-RFdi!pZ`|cohR?#i)pXD_g~b-Kj)W9O5rA*i<9_of}M*?=Ox`Z z(|7e9g*7W6e~-V1)LMOn5ZA80A=#2o_gqisCp(;bJdWEdNiM`jIYm! zK&}11UC5BX8b0pC1dbsX@*(YL{NIYiVPRrno1ug9*U6oI5YH_Ps^$NhburYOCRWtP z5I4CL<(k4;K*d0_B|*oy1oI#befg^7930SVI88k)GBoLPAT!d+mqv6>$a^&G>c-;f zH1&n&QV4zcu1m?K{_Jyt|0~`kea8* zKe3Rv^|-E~uglrKyj3mXq^VJkWN+A%L=!ToMzHvG1V<=cK&w0vLBTS*_>!Z`Ug}FG zXA1uD()OjYQ33khflr>W+B#NQPz$TfVY1vN=*p_XqQwMfu^su5yVde zF7ZZWUkNbcA;r3|_QQU+#kN$_6kb&C(Q6~&l#{#dXLMFqsT%go=U3z00pGHnLyyb# zz*!@nxt7y5r|tR;-}m{JgK?|$4+$ub%ZxzRf*uWP5(g_ajgFkZ+FWLSeS&U911A)Z#mTO@JLYYR-5zz4?a%wZ2?w~doYay zp9>nohK$B4kLvBqm+(Jt&o{p2!wY3Yn$WuGQZ# z(fHEA>vRHRe%PkkQZbvxNxraQB<-Yi9cUCk8s)1f>ctUP9`RiqCLV6zKR}&<7T)yc zmCP(mRjOiD`Nd3qJV8FC;wzrVNEwp_GcPRTU+k0Dj!g`Z z4lW0Ty4Q>peKT4SKC}Lc>93TVl(rEPwA(kjQTE<1-GX6z<05^eHwL)m%@1@(agorF zn^%c%$3RtZ>J=n|4`wALV&qxdP?GHhy#2*$_z+WD@mLX*E}cTRwf=@Wj}gx!eh1gP7O5R~J}0cUD~K$Wr0qNZ-aguIsNI z^9RMd*UI0l!S?4m6@LU>TTf-G}xD$^@|BfGc`FaSmo5gqQ&rR>z+J=N|k+5VL>2QFT&~I1H_zN^fa$r;A3tOpX4XR) znGqn$PiRLc571iP=I{LY15`KK_Jl6V)m2iI7JTu!1#~gV4eWBiUHdVa_c+I53q}~+ zGBgBV>_R(h;yjI=A}bD$SlRvTtwuFon(GyYRWgDTC9{+lFo7r^L?z;|tHxOS1 z-(Aypu8d|fvSal%j7w6E?`MmWj(@ogs`6gZRdFs=N8Eh+f)wA8D{P)oBz-ZkrGsvH zGDtQSy1U5>U_W+HIKDuP6BZ`rXr0_{t~WSkhFQlzcjoMIcn9Gw+zxv}GkFieQy~C# z2P4+0j#6w5(-HD`cCQVzO9hx6`E3PNM`sUBd7<*Bzm**EdeYbg{w_cXGc0VUhuiy? znHX5cuv+Tzhw=_v0BFinr*&L|JoR~@TnnB4XCImcrYI-*=#^+DCUBxTGc%OrvuF3S z#dF>2QF30yV<9|}ibNGF%X^k)z{t5P>ZQEs2JIfVj1S2I(Zx@U-Fd_tC z6l1mDN05;N*HG)syz16yvCEXFI^Nw&-m?l=<+9cCB+%C-u#7IJU&nG0T+p9D0Q3bb=!K(qu*ll`R8Ah0mEZKj+;YQAz@zG&NB>PA z{F8_kEjSrC`kLn#p0g2x3$ZHs-fHzE!h6m6nrCqU;d|b{O{IC!sxvvy=-CIf3W=->$bvZD=&Uh~N)fDZ-;omK1>e{g+MDEfJCkznMM>1OIUd^ayg!AR1LVHpw;HqjF{X)p|p8?ivSv^4h;>Ofz&=&r48Um92F52UT| zvz+O$Mm(g3`>L6@`8-rx475=Mhc3vwCHgvHbTq)22;-j}z!a{30Uey0XYBT5_l7yP zm9?1^4pAb@bRM0ly~OoIfE$h)(71jboshRJ*M`{8n!X8B54OJwh4&vUc(Lv=&g zXdQCGUT-Yu-OxbADu;rI7C$L!xW+z-8-gN4EviZfoYjRh{SOyflA*aW*F@Z29@n9Z z3JST@lf$B1*%{=nIpTuml$C_XoERWQ(&y>mSt`#BN=K-=_mnzD?0sDDu;-bx$RMP3 z>68#W9Q%p}*?R_&xdi&}m^;I)B3=|t^#+u5wRF4(=%Ut(4^_1R<1Bwgj+=Ua=Cr~s zTJcrQtg_~C3zn%+>gb)J{klyqAp7~F(#tn@R|lNRhaOGSHeCzzWbSg8^v6e-_Zm8IrDl-V!z^4MvK>ZtZ)x0pAid+*JoZ;%a_zkL~k`Ic2JmYERQU;eG1}bD7M(u@pQQ z#1eN+*}#Gjb%d};&IOwk)4s1wx7`}rnTaE+l2%jqp*(-$HGF7Tf;}wI;Xw5Fv0OIh z%J48E!Z6{d+ovy<)bn*i6^l?AgYyeXUF>8^3AgVxnIbo~q;L<_{DVl1JhN(+ZDVYC z3yD~Xwwtd$F~(w%RRu7*maj+H3WnzeK2eii$`U}}?8;K_zT^BYP5GqW z)mNd!Y4vKf9Zu0H?QMbT-9HQg|5($+SejbE-3*D()6r-arOyaqlaWE}TQ!xm=~YJdY1{n{|SNeV~u~run1-4~onKc-*Zu$ojN zy`#cda3^S5qv7bJx<+>uK{7f=_3$T&5>i89N>6UeN6WL!A8jqz>?W&|s7|Kin@S}< zee)^6?G`v8_gwf!a|=8tC3WYg2d6#s2JHY=My9WTf*xRkA{ee@*3&W-XkiDuZ8Np* z%Fw+7AD8yzv;hmg#Z;@oIaN8`)M9V#+z*#>I+2>GCYtQu7+B>-ozm6iYL03$ZdKd4 z!vv`VY^UU>cRKG9N7NRB$k3(78h+4)Q4NyghA5*kM+MQ{Sn#R06aONkN5veI^N-0D zoRzgnSJ2$!yVEa~9~;uBr*us$QIM)9dPtz1;;Eatg)FBhuMo$t;%hLP|c-XFUote7==`PntH-IhNP zL~_QRnyAhBL;vB7qGDJ1=|)IOFwWkuUPsR^g@!w=n+R0hSHk;c(ikD{tC;EDZ>{2M z;X9cPHEw2X9Kk$+?3_c}^D&({s&}MQ5O0uAh#QS_`ud7}Cz_&Dn=hC>lmMZJ5DGR5 zDrTpP4e5~GV;S{EfvI>@M?OOd7RdtUu3F8mo6c`gF0^A+EHBKxLB)TmOwuS3-i%qz zo7zGI-K3;cXtB!9DCS^~&!driDqSA0J4*8Qz}-En(A-vO`l}|#(nI4YyuHv55q6*H z_Rx=ilhTTd*w<;L#BX=Gxx!!u>8#~`npbqa)b{yIvpZghn@SlZriy*Hs zry=7Gf}jhutqHE~kKnlKIO}6LJ^1m8C{Kc#e%QQF!=>R6lUh9L@yhm$CQoAR&+Z}c zPb#MPxDICNAt>*G(zEoM&BBl^qjDp!W}tf30l92hit{!bHjAJohagW`-L>G5I=*Fk z0(bw2s<)j(ksZvYpC_zR@bVnGAlRt?MZM?ct|J@9_$+(*-MsM*8vpC=i@mb^mB^&n z+k5`cQ{KZ`I6b&zqaU^((wLC91K@a7l5%i(%8OYg5~S>UYB&hgO`jL|3e(hy;ipPV z{9K}dq<$sBd|AUGn-~VN>Eg)7^|hXoX$ZSJsSAB#laJ*tms9f8?{<5pJ3-JfnnzNBBgeeL z`?E4{immy_QyTFN^U|zcdgYOoJ34m-M##aHh>rL%V&jQX5$7&~u*;`?Kq$*6a-h%9 zCima%k&L34(O0Fa#a_ouNLjHk&tA5=Q{tfK<@Q5(-0yN+$>9MR-|haQJ&!fnTLl91&xs zeiS6W!fjQenByS>$b>D7Gq;5zda<5G8gOxiXx*j&E?jwT(CHiyOx+j16A-|8hT|6S0jRZI)tU+=)|jvKI)Q7eZWvTS@f%7F zs#iowB%l;kK{C*OW*Ea1d6RNHNEqfl-Fev!3mzI;2u@Ijz8=~W7Ur+aF|3c2bfZey zz~CPCCvx%~LQlke>0pXHk85)?K%@=)JLoLzJ|)==|C9N^)=xth#8#X8soRhlGT5J@j6?bh!Di z+vsSLi6MFEU0HAOe>pEI^5kSWTL|}j6ZgyGK7R)tLhkvFrK(AW#1tONV>d~-Hd1r_ z+&*AQ!W|l9>uXxj_I}d;!37oH#Qba@?MN%UUbvQ`RaPUp1P?btqx+tTfPg3-2rH_; zjV%K8_?S;2Y9(H!^9t%0hwZ%N%m||RL?tR@Zt{-TI@>dhzr|^rqGgQAt1PaOj3Ilx z9N0QVawVRI7Ys9=if$pcU&B;a4>cXcqDpqHnr}fdHjI%bRDpz@gLBX@DQB_J-un~w zy^K<7cA+RSIWCvQ*$P40$X88KfA**dD$o-hW9-wrX=jQgDWkO+b#P72VmqGHeZQOV z%W-ItY#rXHi(e2zIs%xaKM7A*VVRuNXTYMX_aKD+n1TmxpD5aX#iH`-S_<7ga;X&L z=glrj{v^ucdz|hYYj~_}04B68Xhz&qvRi~U;62?cykpq+4_2!Lm{9Mys5Ev&QZrV{ zN19V^9F2VGkoOy2=EyFbIuust&t`RU&0n~R98OR(M4p@ji)?L8<)BH4Gi;&(r$Wml zlvsxLd^{jv>ZsU6R6-#tT|hUe$Kozjlm-yYr*vHS_T?m(2R z$i55LOgs?X92N%wcTI+7k2)}m;wC}Ug$LFKJ>EFtmG~RBGgcP=7iC+`+;gq1og&o- z%seuQlZ~8VYjMNnoG$Dfc?w&{jb6SWDZfANt zCEuHpq@$H^eFCxZ@eC-_!EB5lRo^fG3RkBfE=0f>J;8%nu8#!}#ZtrGzt##!n}9Gs zfD-+OiKYw~}~tGnxPF!=5(cc)1+O+MXBZ+hqBF4JQ^ z;Uoh}`2M&qj%GcU_C4&fmPdPLzbG11UMeoBPO-Z^u5?v!;g(5eN@X-<)DOAaBoy>lz4bpIGK&^fDqwoP?Iw-7!i~W9ld?I(LF>(YYN|X#oLU%HZu%`6dg_r2 z2kJOo8Wyfd-#P`j3FIMCWC+iG?Z=QG&Sb;dG;G;wZ~=Uh76_ZWvxXXg$E;|6z5IbV@HiRQT~(d3-b zCnH}|g+$+k>zCWEKeQ#|8OKEq-qxnI0K2lTJB> zIP$Lg-D~1;P30r{E%!o8HD1TnhW}D*y8eK%FXAICgxfRp?$_>)hLK1*H8o@or_6OV zE%Yyg4B&|>*2^Z$G4NDxpFqpRt{r!A0Fp-~`fSQ@JmQ4gvC5B!Xzx>U`D?IGGTpiq zLLB-A-k|(O63oO-Ta{XyrC8ww1&>VYprN-3aDK#AJ`kK>1Cz_t4T!b=+XfFDSd-D@Txjg&WiAR7s6izSW()!h;IsM|I z^Pb_hK&GflVPUsXEA0$* zpQ!88uiyO{qC1jia(4}MJYA&H%9l=!q#mA3tLO23>X9qemeT7F6R-yAW3y7v_t;h9 z(|h7Ny|I0sToQ5nwBNoJB8*g%32qEvi->z6J9j2*;%H=+G~kFkly%G5qNAp+|1zx2 zzXeO3`A1P})+x9fm3#V%96p-3WO-xa)r_tNcPNKDVL+H;AZo*Yx{rP{V?e^}Fo>RC z6)spr&^qHui5PTn87q3u%$N0hz8*^0YS+($#lP5ZU%T1m`#{iot3lgP^>>&lI3yrB zMtO`flmlT;5(?C=um9m7DHOXCh?XzQ&Q{tc@UmyHdNMUZNZ3XkaW+YgL=+S94~7x# z_O0D}`OS8}H}0}glWB+I^?g~kLcv)=kdSX*Rg5qYgI>;m^wDLp@mbK1@Y3;k6`nzM z;Tq&bd*T5tqS`v3p<7|ZDqK6Hk);}|yP$j5#xMHy-ma0b>x)+WZaH)8QP0RAYbO zD?#bBoTgabOcV;#3bcS%j?MB5Z#@hjnnai^^0j#Xx)6nMf+fevcj>_4PYII2vKl7z z1rw?X7LsX>5$Pq9E?GH@8bwo)-|1;j_y9&=PZdlZ+SfvixVjL8=i)!{GotOD(qr}y z=Wie#j#XScDE(niM;9u(bk561l9rTDbl3{o4k`~Fxd z?Hl1-AT5A7SIjHW9EE2OMH zsHl~3XaHH7p@H99A>p8C`@Y_;vK=?^wid&%=sktMYY=RAA1DuxApQ_&6i)eK1(K0;cRhka>cVt^s~tEgP~skdS`OwQY#Pt7a&q;AoZ*Ea^2{u^!>} z^Dr~ai&`{+Ic~O9s)h6#c0Vt$d_F?5qggd2n#2+g*6ztq6zG`jRX~&YJ z%(AT94KAZoYw>2H3u9zoi;W8>%v1IDD;QaNYCn1FE5B14N#q!fV<+lqX6x6CruOIJ zse#o~!Dop2&vuR1#CQn(#9+PzT9362vmzyg?HZ`*61A(0AKR}o`9~W_-~Y$ zvmVxK!t(dygBl#rh`}Ds>7ApL!V97sqCeNp80;pONh`x~e^1nf21E0zl^t57pT=jm zPX}WC-vG-p0qXBJj zw`bkxyb&D$YDo&qg3+fP4QbamL?PDdAcxAtxPk)6b?~Rs_)7;lbVa1V$`Kpf! zpRX)J-!qTJ9VYxK5`q)nP;gC6u^l4o07~vAsHmgZK~^|iPenbO%Nn3#>9&+9|D4H@ z!djAFEXkK)Va4q{V4H<}`X*fv(|{SN^I7{Ahyh!N#p3PwZljTTFFA!Tuo9-(tIVWb z(T(#xJB?UjT>9F0?cS6sn)5os{%6$B#vvKmN5{$5-!_|lvci5Ge@J~digVom;GMu5 zx^_C>c=jD5oW?u3Z%z@LFDh;Sc=F)%$+{||7x=W-Up4z7ofBR5EM|5&!?$;!cHze; zTYC@*mWP1Uis6{!5<@X1gN+QSzo4+s?Zayt0jtuK5m^^MNc|_0CJ&}C_-)@vzDQOD zk)AYVj?2^vnDh4VlC_sqf=L?N)uUlRe6vn*u}@=a{%yOS(k8`C))AS2vY44`kG_b- z2Ju&mT_^42B|{B8ZatXUQb&tHsqE+Jmc?rZTW zv0oSKAS5l`&?50ire5|gb%#x|aa&^Qexl}K!~Dk2{$nY$Ag<=l@az01nuOZxgS8^w z_jpsg2SeI&@6tQCwl3zhnT#p1wDXG#=2#%xS^Km46zT%TM1(?_>(B1i zZkw_p`6|Y<*V5+2aZF7?4ZT~1+|v1^8>(7WXnIFMs}^Q+_|Q6ilXL>dyL(m~!@Aah zX%Inz@(;`850aCdX&&VQhYB0hrDM7(-rUn0*RwQstX=G?8KYt~bXKm##i9i&i}DSo zIm*?u+)35C5?q>53N5r1w&f0dqs(gB(by>Aj2=$f8U#~0_T1C0!l@h;^ubyi)7a`E zD6`UbVOsivWzT3ke~(05~fc7msx}XhR1OPEpvlnn8Y}@bP0(;Q6e1tPXUZE$USL#?A83(z)jC^`<1toFbY+sa*uQNA!b# zx%}!iBmQz(6&h?lVdTk@6-2-y&Z>;qFl8=aG9gyGc8)8V*Dbdx3bUv`07gr6?lrjn zL8+4W_ry+^JaPUc4Bv2!+RW{?MuUlgis+9H|3x0GXgK<)K*Z}NgF*}-luB7IJflN3 z{`{|Ae3@MFkG^#5RSUe8N^nEHYqn#Areh?Fb#|=YvXdhzn9`AfDLc~5pxTns(b(%p%&}0y_mCFG*R0w9XoZM33gm^t7zEDFhTOkcO zwc?64iM86kJW0&*)g?^iMQukNhHMnr9xNxJ*Yl`?X^M+qcGM1E%y+}7=nK1yZ$bwq zN+P`md-{5wauKSV#9??6gujzXI-{nx&DCy;csktZZEwZ11=32U$_(_d6E`rwfL1^{ zI74WHI@6XWqF@()DSg(%RH5rcj9116r`l+Sb+pgUwfSc>x>q4ZEo0gCC|dR8*6L2Pyb?R5FF>TV^PuUckAw9A847(2WKCNo~=`75L!G zP^=BvITK)w3CBx07i6+4XXXZ7PkC3GE4QehD9LlJlX^^UD6UD5wb)X;kz28sazUJd zh+f*{1I){IuZ-DN`QfjVydyXZKXYxO@{#)BswZTD*Sqggb=_n9+D$^H4+B#%+^pKW ztpql!xyP1jL>)}V)ql-9#&2N_KFo!!&!m6V9I%Zf2EX;Y9LfWeqsKuJqa;mStlUlu z_m~*hc&>1c(W08^jKAd$KeP2FqB>xC&*s}ss{AGhE>E%>e&!Rvyc#!p2xE=5_U&C_ zIZFxgy#|SOd~PqLuO?*Q5oE8#D+ci_S)*%zN{YVW?u#}U_qC%WgwYx&sk=!y@BX$w zt9~3iSgT=hw4v(z?EuN(d4NTwJ=8%*fEsOdZM{|Iv%YlY#6Hq|f@F{hK_E5LA%BJB zZ1GTT--)-&DXLaMjsybQbh*IpAL6EfM#s^z0O&UAX--G{Hfmb3W2T#rZB34{dT@nc zrz4TCq0x{~`lME0{*i z_aBJ2g`i0GZYIn#zDJ}d1tNv#ptn}SDaqC``AKz)k9HxgTqb%?v8WGE3iOTeN+{Zl zp1>9wvpMl|1t$>wG^9ZBlM*G(Ob7*KkpZ_-D@)`%BpsJSX zrz3r!$mmkvm+Y5F+99vs!Svr?2vRClxE+`;njQL%hd|02d?Ej&P@@nDr=o_88jyePTY%Tljj;!Uj5 zty!VWfBUV+`n2JfO)`yTm%TVeFex z+D0zOqqS*+vNhydG*w4_&e6{sCm(_xbR%0SXpSehYX$*EKS~F(Lr3As$)q zt)Go$Kc*t(Q&uN)XB|ai3F%M~Hr0?{oT3p^O*M+a@2}L%J9kWgoc0EaO&4?P7`bwD z1t$(~`?jxe0wSboUb^Y>rN_G~9ymlugA=Xi{skjUiiAEf@|n2Ce^v40J8fZjOF{!N zMim;&V=!KYC#l`t1Fn`pP7$t;*+mOR%Q;LmXFw+Y*eD6NXk6J&tPo z*kG=@;jEWDd`Gh(WXayK+gwX?BD7>-dsMLRZcAKPDq@RXV`5_qT`^>68%dQ2ekzM^ zwCVn=hR7fP30|@%iLY#I1(=*`Vh%Uji|d}Wr)lnZXR;3Tt4SPLxzAYr7M7LNw~?M9 zTD$MXdc!ZvqjZSt)`dX2QJLPZe%faj0&NY2iJIT8=X(8K@7R%r@vNzM!q`JC^D~I^HzoxBq@D=WJ`1#!rtok1-!P03AH4T%Hkyt)Fk%g~tyHC%B+1qW>Zb_DoA>KC4$27liYs4n&=>eQ?I5r8OIP$+SDG!16snYpWw91o|h$0d5~i1t9@ljsT>=0ykR;ke{z!|Mu$T z5u2zI@*ln4jCui=mya%Q!1rPQz0v>b$2t$uGJc;y5yE=A^@wP22kH2?wnlWc|C))F zI{cshX3O7NlfYXuG_^wzX?VzYrb)_%_F@V0}-UCKKx|fD;fmQ$Ctj(;1T%rWl=s>h?wlJOl-}t%+S%U3Z zI)P}sKE6SpLe`M}=la1vT<89gnnzXFf4vKv4DC39OxnZPy#my4wjO68)BlBJ|0!#J zG5;44`gg7Z+iDL%ai22K-}m)kl=Av0{(mGQ5a>+sR#PTj%l}pr1qq0Fo&Jx#cbTri z{hMTiV=sBC%OF*7fi<%b2GAv4>J1SHRr!^a?4V?6GCQGFPz=Ji3sW18vG9iIYmNpF-mHFKyqUt zFWbP&i@m3Rxcc8Z|4(B7OF02#1*6Xp(x*#A1AT}S^}oyPzb_*C9`dgGn0}=YC#wJ1 z=7nJF-6{A!y!$dDKD(mdoH=->_O0@(6jJ_K7<)*O&6SjczGM?mw3 zf5`vXehnadeH8jXO2og@;lC2^KgH``Y6UP$kTD>N^w`NHf6BZCZsr>`LQWpzA>BDw z=(k()Z(bjf!R2-kW6T@6C#oC9TPrztq)}H8EUiwx1g!wN6$+(Zyjh|PXsgD0=FE!) zu&v?KZqB=8a^A=C$W^E*z`bKld|AJGvSv9y9^=0jI$3&zIT}L~DcvSy5(Bx%OuY)_ zPrGT);Xq5k9=J^9!QVmdF#VdP^f7DG+(u!T3B!83UIfB#Cb*w4^CTHGH}IQn=WUjZ zvGu!TTjR$nI?Xmq4iOu$~>>7UP`D}>iBS!Qq z1Z+%p@y=id5M+UBvRMnU@Xxh&<(<>#c^=6W7Rm0h*eXm;_Tx2FgzpIKc&b_P)_-4C z`%j~<&)E24KOz#Qk|`S2Gah1&<`$10JEdPVPHo7O#*dqG*!HzTR zXjRcYh_wptsZAZcp~-&NTIcW}vWU1ipQgYvF(Ff7;%|y27p*lDF^ez3T(<4_bEu_= zAb1I0Zfd;6xc>VNOLL?w{8?x`N_sxwl20uwK3iYuqv-PHtG|2phT}{}L($!cd?Wp9 zG4fyRIQ(J{U(Z!GdLNVdu`PR@WP2Z~8lE(wTU6rb(}9XkK3d!WJ-3+WTI8^je41Yu+0rZ9Zs0%LU!TKS4qQjMlhS4k5y}qbjfgf%%SSP zR$?y@)>Eff{e?;W$SO&2q~t(r?2f4Lj_J-%G)0OJdTn$Q=-x{Vrg2_#l8yG}V%gRh zOGB2Yw=t8(%a6|@oj2P8Z7@#wMt;uZ&uruuh!|W@q6uCydhRfB*APyt-IZ{%>1oO8 z>FGSM(;2|~i`kJXPPRCv&&=~3)>vwaREkhnhIGL5=;Bkh)%3R3#!XpJeQBts{C6rT zb30ErmWEbZ9yoF$DyaNNaxBwZ(kx>t5mY!dALu1RVTv$msFj}TGoEVM8KqtL ztJ~NOUJ~95awkPpKQ;-7H$#C~-aIwYnZn(?1UW;Z@y;!7*iD=MRnUvJvN7~_Xut8( zxV2%5mN(j1qGiA#ytLN*ZLx;>H_N;JOK1h4imWJJZ^q&>YPww-W8}Un&c}%oI7V}J zd&Aj4#b3N7syH{=XKlKj=%bt4;f`MA`(6+CnBj;UC%kokTIaw9F*{XeuE#n~~0sB^HEYrfc6azp33F}*&0 zzdRgxo||Ogi!8o}IoVhK8k!ti7^dnLXuJW38-L8o-5WnEOuNN)!^uNZ`Qtczm+5Z3 z@8YnV;-iqva^d=zMmN4Mu{)`{hD!B4v&(5ayb^u33Y67!nVd?jU z0uD|=0Eoha&qeiMA%nx~uN^$!{=~Og-*xO2E=KQ!vofCAb`zTw4$Nge&1P)W=_uA^ z?}==48Qq$G&J^hDCY}?r#KlqI(Y(`vo!wU+4kZIAVmF=+(`%P6(bf|(XC=8CYj$3G z1H!+tNqoA~ykph?d2#9dGA)op>Qf~d=@A|Q%)zlrcyc{qD=asgU*(zv3g4ca3U+DbUt^K@Bk@RviB0f#M{;7Nsfxg?@SMq< z@+61IhLmDjA^AF&qb|`CNO{Z9qF~3b_Jv_T7R+8{Ok!mcD#e`tQt)&ICCBN%+L;jR z{~M1=ZPcf2@+R}e^8n4#wegK$li?T5R)<`R9{-TL7uTu|8jXb~#j3aCviq3!c7BnM zU%z9WD@JDX=A{2(Aeak72ZgbH(E`&5h zvA;YkezZGW^pr)-uvD*D6NROzu9pj(`5FmW)gaU2wU1}e!DR&TLk!THd4#{?$X&HE zqb@#}w}-85U4MLq(c7pO1OHZBLB=Z<G}}%lIqd&Qgv+7Xbr1 z28N%#;)cJ&HaN8PM5@dO_0O>`$U(Esh4M_`N3&x+dU4GPn=*|aMw73s1qaeV0_%KSZ(Lvmo`YgWa=cuK&U-wn9^ zT!rQ^pF*~JLY|*cGLMR$*j{p7+gmVW4dvhb*NKNG@Q47~CNtF{XZW1jJ1+J&;!r}^ zgU{BV6D@+p1xRwS$*1Q^p+vYso*ddg&Ex(&1$-qv9*+QD$#*k5a7_|oq=yXuhFPmH z#Nghb%^>2zAdeJVXn9k$y-(_E)j+$@EcRTw3S{UHcMqkH?vfA~P%^V-r79~~(zvZ+ zVZ|F7rfJiOnpX4vDmAJ*GGc3(U5+06Tbs6DR^%_Pzm7@LjS<;Yvze5EnOVLf_z^K& z60{{inE(2IX#oM@K}Ljxd9mrOzKYIKPgDIg`w+^u8u6UBC6igmWqt=R?lrC|beaaD za;1d~<^T|SO6XR7^80eY7ihm9MYlU9au<*x<_2fx#8HH;)HZPS7AmV9e(3@l=2pnI z`C)C~^?LTm(}rfq34X+Up$2=kw8ZTJh`+`{$iK;H#@-|v$)cHu7;OCy=H4 z4G>811PB%+!GpU?g1ZKHcXuba26qbtC%9|l?(XjHPS4xPxAr>UzI*Mp&pG$c{n^jc z-E+(uqiWO`RrMC8CpjQ(r5v8W}pK_7ZrWRx(1ifWqZs@j48X}8*5r&am5vNqXd=v?@!a|L#f^GOJo#5?4B0)#pYIYh7l^4 zjaL;{j<4P`wP`p%zfxSCVbfz``d)OcGyG7-$391Q0$kmUgRQA_mlcCfRvW@TK-`@v z#^^|$897J`;`x6sn?G@r%iw}~if?oi$fO^!92rOIeJDYcdS)$tpA!vL$_ zmI;hDp7VX4v&HDv#%Um?%S{{7Q8UMx~ex< zWfzC;jXIA3LBm5p{LYKXU~$VICr{qqvfRKv;nmrbG1Zsw&=Gyaa{QH~p&bqROApg! zR4)5MArAQ1Phn*|rQDSTygz~kKC6oHceg7%4xH6r*XtJzGTTr^W)qa;WFBSj zcl1b?=e@NIi#Nr40Jo^oisp0ehv14#LfW?t4+0km<=`if2{64Z0E{leYsak#g{YJ6 zVd5ZD%a0&uHlM0Rs-G$hzPM%}&w*^^IH{%IpxQ3ZR}Ya<8oMt6FT#65b@+Sy_+aBr z#-9$}piVm)bl4Wcs0Jf8+Hd=GhIR}4p}(Z(;=E0#P9In_)aCYdzvbTCOu}Kdj~Z^! z&etpGvzeU*M$r02d6wsig=8%38ZsoTgeY5)^wGdi?L4A(7wl(rnZ4)BZKSkh95~P) zAyKZOlWVe%)RBqgE?+Hjp662-kv3%{E%6OeGRF$-$gc)@x9wIniZf_>TjdyC z703pu!CF3!s5N^1taxkWGB{+tgSO48HbTltcken7&5?c@e*|r$HUL#ftmU_Z4CbYsxaPrv#@=Zf+xD2 zZ76NRMbBZAb>H9!mAvww2wEg`2T?E%T_g;bfz?s4U zY{Q+#e9J{gZ*JK(?6P>?>js;aJ5%yxA$&)7-p6@VaMG-Aqt*DiLdtxGdAcj+65m0zgit%YF zLuz&IhVHpH#rtkY#!0lWnp3<&O|d(tApC6bU<@Yj6XLafThC;;QRDNC)$1IZ?z{j< zyT96z#C?O*|HFJ}Y3jPQFYO)bu0ouY^eF|7%GaCFqxE0pSB)&1SCf=cr3Z? zp<6(|Po;l3R@Rs!ZWj|C3$%8IyZRLQv38{;qi14Z@( z_3`@+(_WOvdQBK=tCG@yhRL}`TS}OZZ@He64O7J|*0uGhQm){t$JPV6QL!HgD|#p|}#KeZuMk zRn8`U74*qbX#N=Ni96_60@3^gFhlGU#`LMV@pku{xJO+9R;3GA!h{6)n6DmBke)Hy za9a%(Kte*`#)#)u0)cRMgxnC1hEJ<%StH;w?Ph=n+u^qL=5qBKqj)&ZG(c7#sQ66x z`LS_SUwf7orPrOgp2{${q8;CW3JL9??&p~0q$`?T(1XpD{*W`|aG+3nE~~YwJU8`V zoeZm!{&ec(-hI9L@xgmlx*qjD(ibG&OnZe~{h6uE24~7`X`l&8d>JtD{v5XSLibL( zGv$KMPGlJCrQ?NnnR+#Vhv#4mSW{({|CIgqz;fFwnN0YtTc#12Jl6yErHjH!R_j9u zzcm3%!37>V&ctX%gndw81i>sC$nJe1V+ePE>P}XhLg}E85Zb-RQsO;T#;aUi&HK^! zuHe`#VKC~Ao8dhu=)#*Sivq#Lpb@aBTZ}LOCs)$Tzi^MclOP52cWzPs+*;ClP8!7j zkPK>g(_y{S{+18HirSD`@jcY9!thn?VOMC|EobH=lf~fLB*(y<+2hksg`7VL1GjX% zy~|N29&@pV_^4g5GDV_Cji^NnGHHw{iEwwYEghFoz0HVUc8=~-;l?+T(DEp2xe0R@ zsac{G><8I}^HFP%xMK&D))M<;6KtYJ6dMvXyR+{iwNA;bMDRG&<@IRp@Q1A~N>{pY zt;~QKJ5wIC59Zv6Z}^^R7l0)Nm4kegVehf0U3X^S-s_jw*xW7`YHA8lJQP?>CTkAZ zt8fFbi9gr@>yN)2z^RfLX}`iXIt06;Y5+m_7-u2DjtW?P`V8RG2(rA?6N+O~nVw0iI|Mlc;f?$sWO}^Cap&Ef z(NEPDnbCI_q$}xlVhsD$3z#2Ad5_H@?xr=ncy-Wlg?9ck&*OaKEV4|lO4alU#KGYP zKIqD5C~ncezdJ>Cd+a`6U0f7+gt+GTb}6+?9P$JUC0I~01*i5gk~pr)-_OBse|-u} zAL)|FyNo{euNtwZGm?fdq<&v|7m*KWGNR~lI}$Ny;OyeLp*ju|Hg)NJ;f)IXo>_+P-QD zqp2pEZpv`34Syy_YvJiuUfsl|hj3kN@fV41C{CV?(swI?r#byf~o)2eAGTk1x zJ6qfRr^^1VKJ+uj$0+xEfkyND_vW^A&PDM zY>a{$$2ropsU5k}>?yNgd3`c*CWkt+tJPy3b*o>MuZr0$j^J@BBS(~k@x#~KNI%QJ zNNyK47PjHcmjs2&{scDGzHj_{Z0@Xdo+Lup&FhItp=;m%QmK_deWO1Jv=Rm6B#jh_d+%iW;-h?3ja~Zso2VM>TCY(){}jloLqy**7N7+@=M8C>$MaeSO*TnHOH8tx;`eQFdrTHJ$q-_1dM1 zmwohZlt$ZL1I3sStxnNwP3(JHU1|peCK$6qnwh~UsMKccRFHdGIjY?2SceZ}>OKOW z%_gJ5Fu8b%CUkgnmk(aW$hIija=dn+wbN~UP!gi3fTp!jRTdW@vfk0-(8X^kB~M9x z01u-JgtD)P7g`C!8^go*7E)Q$!xOFJwPHkucc2WUgMi;`Q6 zulmw|WWo*UW77NvMh=&eRe|o&kmXQ8g4u*WM}xMLbFx=f62uU;pS!H5hmXYK9Bl3V z=+0Oc*`)(*DO9UR2{ogL!9~o6XUYO6+?3rgv?Li6z9IYTO|LHZ&;HUPAfPB~hK1%3 zONLD(>6%lOBP>%o+{)^pP$rnRDOOpV!(Jz?nAPL~bAR3>$8n3}LEKIc|lr%WOrC zrv=9WH|*i|vxv??l>wI$uLe2c_xchQf_vj%Ca#$Vg_2QKOQ3~*L=CWmm+BJ|Az3NR zF+ET>!eZr+n^hKO+h-53j0wZTyyg$jA<3vBr`Hj>j&ML}=G)#hGx%oEA3?)`{-R7& z6&CHfpcRkMJsim72(u2{TJOEI&%(&a@x@j^t=lG56P*Zm6eO`NUU`Kn01 zX9GX&MCB-hp-i5~YAQ2+L1_2HHl%9UHy3jYV#92 z_x-Pw>XN{nVNLSLw6O(Cdf84%0xhSxnx(FO%O|oIAuv{#ZWxRv-AKs+~oJ*~< zxLNC(H^Ilnnv-3SW1(OXNs*t6p$TKD;aki=Id&^LeSZGg*oHgocq)30`?Fm?rCekm zbyu31FVmh=cUQx%Y9y9E%S%z)g)OONwdCNHF{o1Y^h_f9n>%5cy6~Hbr zBEkm0DJ5cK=vNZ|gg&jbD#fJf{_VXMB)USjy|&p~=I!pz=<$@4R`F7pI=+}zRxRwO zau^i>8vK;fdj!}lCx-`qI`@nMe6AL~9fzJi7Xo&>h;>pn{zeP0(J?ZSw6g{##W2{4 zzF__lo(dD`p`+103dy{3_VIfKXTPJebB`@`)QA!97j$Hp{S|v3(IlIoQGU#T%N)Q= zdq085V}IXH#2~UMRPtJZ*e>Jp!;=PD#K0U-%Yn9R=e6(vz zNA6B}{ya+uaZly!i!U`WiwQTljUv08mg61)!g3E4#fBYR?IZr6x{|rv^pGEjx^+E`tm< z#y@04HlLNDp9i>@n^nhvjdJH$PTbeI&nTM2LGN8Xg!OkPVhFJ!T&U737mMSs1`0M{ znj$ZNK}7RDZ~dUAc`maNpsA(BOAdT`d-miFaa_2Sn^X^MKF}RsNM)+Ma7~lFDEp~Q zw`h0CqDwnB8vJ@Rd2=!;nhMvQKYnWPIM}XXO`5-9HbpIVA%W$*GkS?JI$Qf-TYj$? zsa5W)``l7F$tyyorWB*hQ1Hl5*e1P^m3x*TF-aCW1G>)XO@r+JOWs)6JZ6!$3io-0V_QZVfA&v63qZ5fcdL~S~JxU|@JqpF#Zd4h8DG3-b54A-Hkc<5~OvPym0 zMb_vI@7mWPRPfh`rOoU}Ik2zOYVR_z(W0DE;>qn;ggIOxH&5FhU27kX$kNc+NM@P) z;=5cd2nr@8`B!Q9Q^O#b|2)Appu85^bv{xOMLgqB@tNGK`i?8qik8$(a=d!1^&)hoO$ky9T z#$N{#!rN-mKE(n6e1-{UwRMp-!?XXo!1vC5>WsR-oiH!ESL$*4pX z=e#@KE~aKiey<`xWS~kh9-qw5l5n66!PpPz>WksgBuB!-gg+K>Yy8e%S8`Ze(u!`E zDMGF`#2@$zAN$nqz2JN6aev8xAAP3XlqmLM>@xMcdCL;VG2GDv@144wwWui>j5I{U zK7}Rc%Csm}okpRvHzv5#92jV%>&XaYM16LPtsTh7rKy6mk0q{8jzV&wRoH)%67~JA z3Oz_y@A&Am?~9vVnV!TZ6-OTXndZH zO)|?3X+k678m}AlwBhNa<~m8yiwaUmVg1Sx+)`l$Y{40I$wWAx`Vap zB8_B{bIpJ@@4!l=N&^=+C_ZDE*V|L6Jn&Oaji_yGf8)18uOm?wI95tU&pNTulSLT! z(S?s`TjDq4$2FzG_P3eiSvweM}zdCf2HU5-4Y8Bx)?# zy}XV>yjEQwC)nFnt-nW~4EM>gpeJdHu*%EpqJ0ydt}7nK`pc9?R?TXUex@Btoo(Ah z*S6a6;zv1#(5c(@N@4SgzyOp8avc-s%9r7G-}Q384ZxYyrH>`PGj1XPy;SC?ZH8Z~ z)g14{@qXZo)Z=Wr?xb(TZ>o#Tt9!aJoAne~^lk+qml_=bVC`EF^tntm71&cYx;_b2 z7kSIqRRVjJ`?%|}O+^GtB1@2Z4$(X$9dmOwiagIVnl|P{o^SX{@uzX+X-K||DIgDuR_NDp z>E8j6XiUr~3VTbrp(7E++G>-+I^dTJ;y@cRX`kq4ua+lMk+1D2_fqM+Ul0v0>uhyM z0yDus^y!Y{?B3D{CAjWTyS=846^hY2a5ni>X~k?|v)w>o*)(F1P@?SoLu@NsxqEL* z56S%|gU5%uUkk=nmX4v)sCi*eGgmTaxv`{)McI|UsB*5w8G%RKDerM*y{(-3R|Sx` z4DI#2xERaCbst6TEi!yB1A))TUI8{P!M`g6+*p^E-*k+Y>wN@)T*V;WCP=Xh3@PX6 z%)<;F%~e9)NvoAj3oDzi*LY{cZXB*=WTs_;Z+4C+Uqx2K4*8QFeD`ZcPi?YqvSDM4 zprc!iaS{cu>Ao2^c&NS}-+61_)|v!5Y^}RiTFOt*qBsvM>vy-Cqlw$vSke=homy)q z)c*~=?FTwEdLijOQsTJqd6s0?c5XTKzHEVHu^6m;wcc4S`&v`?^D-NhnV7X)Sz6<) zG`(jNhpr$asY|M$>atn0?jTjFjP&OF-muNkK>w}NrSQWo+Swaaz5}w`h==MH9ikMC z>Q*dIHx;wHUn-rXBLYQ_{EgkqI_!|w$FqvZphS&(Kt_VjtTw^mu>@CpoP}_rd#OP( zZEI{ywv`@xbj+S%N<9mTj;~RVOP6chI|<#FOIeYEh>->SYpMAi0!-|8%yFCE7sS14 z9NA{*Og9O1P60;qhay#mj!D# zstKG0S%(n345elwpUAodGfuewAB$S zv(g2&7m}*x--ShXstW20tMOeQt!OnZQm*-cc?lT|rSySeiK)z}k%{Mghl{riiNZx@ zj^kZh1F6XCb3)W|-Ah*vWN_6?h62zvDERAJ0x!v(17PsqIMn8D7}m%$fvT(a#=~r~h*$4=l`;x{dnf##m zTW}wfoS4k=$d!F?*){+HyEB7LqsecRW7(ERnjj+Q-NEIoF;S^(qd!rLxjJ3XLus)j z$uw3@avb-D_!hK4W_-4469hTeU0h}QN(?`1I&(89vnz@@agvQ$<;HA+GK=KgZIl|c@xf!t!&=^6n;%^NJTD; z6pqJ&WJq1mswf;u*`3|0=VW+ahq+KkViZm1F!_(vfRw9&_;L#CYbes;^dDuiMU-sn z?O`a3L~#}T2?sY-)P))i^4MNEYhll!u5P4L^N$vTfe*UFS6 z%pR}?3_{CVtUcscY|zUT@YMVBUO?|qzK?si%U?rbsw_7kuD&B&Qf;oU_|>p!z*UV) zgq3P$-jixp`%C@g!)UGrCS3E#TbGFEU9CFxE5Ln>OYjjW@#i*W+Q1oH=U_fjZJ%@w zkm(9cRU_37E8@z(gW$mB;_6~AWYB_$<}Qj6^3y+H#B2%5ah zupI#{8^iL$Ag9QBVRq*>-Z?{yz^^iJo+V|6=HYeFuQkJxer8LtpYhaM+p&fv2Nb*V zoCRLGYLfg8haIb=5m~10v<~~K@~n24wyLG#WUh+RRhOeX=2brgQWVt~e~PZE>GJAY z07|?n1-zRBKW&Mbc+zQ;5s_pO#sK+)q<9)VwJZ7MS- zy%r9Yoj#;%mupF@mLpn-UPitXL1wtn4fnd=NLkQrkDu>rL@Z8GoiS+4rnI~XllpP9 z-DpEGM#Gk(rVVlDqG)P-5S()_cD}eXAA;l9QUYRX5mqKppMU4q zF*lF_VvJ~~Of4oUxtQUcMjK|H8}FIk&IuzbCEB)&Yqp$E7tCo~P{jy+H?M--TWvsv zT~U&7ittH;R5T3ELvgf=If?#a?FruXMKVceelh992!>rUe`92Mc}iu$e&Y6>>N|t+ zYiC4+ObO=VBOzCY)SpSY-7<8Az;r8XfM-9R)pTBw%=%^BB&B9et29#Q@Vi63^VrOM z6x`a1YIMy8-C{x}@reQ?^KGyD*=bLgZ#xNKwHl4$qLLJFvkT<|uCO>$h*IHLLfzH2 zb;BV`Aghi?W~9f%MgwkTK#|PpfWhI`6|H20?&KVz@^{9Fj%z6W2yItr3#{3BQbXRB zZ_zEpO>w&0jyFc;3rn7_ukEv{_eb9~48EIfo-Hq^ZqUCSfKFB6?u7oqX>AnVc+|hH zP*G1yVO%KWU20D$#Eey^CZ+z+j8oIVTKgGY{ejH9zg$hxwy|nrO0b!lO6n%aJEA-AQ=EiU;f0(#sR24zqL}9;s}xkcmg)d!lKcdac8F zMSqs+P_o>Z$nCH=O$Sww&aLOmZ|ek|=_A0zVxKbyaOi|OxaX-Zxxqesop`C9ginx>b>RnLTuJ7eIuj&{Lxm?J?>d%ISK zjTCgs44xKnEd6nC;*ETR8g{c9)aSh@O_5HWrxoO|X(rMI`z^{5dl6nA)9wsbnM9y9 zrlM6))s_w2_kzK>VJFlov53t$g}Z)B2aqT?>}!rukuMNCkH<1wsEU(SNQ~1Ircy8RDm$xe=0)r{pAqwy75%aoQKIf7tIMQWGDbajm%sA}IN9`G;d!Avb(STcQoR)zxjE?AOC2-jAcZ3MaryGc>lnBMMm$Of=<&>% zDMVJrLMng$)UiGo5mOJ*6&?dZG|b?Ov!GL&OT5hHKQSy7s8bz+*yvIf?hS!AW~T%r z9D_XYJA-!d6-uUTpB7j-z#>{}2%hB>aArN;WwUfk0G!&P!+`-HEK*g7@;0RP#D(E8;#;m~;M+xY6|rGy*K_?mCi7-p*4uJy%F)*Eom zR)8^x0(g8}=7rSAkXp!tsel56(OeR+>7JRNwrR-o@iFW^O%laTDR-un+8bxn_!lhNOc{MAQEmly>AKH)Uq}a{P_Q=!DzGG)itzuIqZe zANz6PY`otDg)jXzm~0w;Vy|^c#Gx)SAhHu{eb&GvRp?VW@L@suxmj7Z%F5w(j|Psz z!bqn#8W@6la-^G zzgpn*;r`t_Wg*5yvuKVI=0>&bgYCEVQ7&Cz&Gh>94BJF*gbsgh-@;l9buO``Wzr_s z!|?!T&#OqI?-xBuaG1G8SNo$~^<65hpT2UFk7hcTc-PllnH;u??CzqbMgaqvy&a}M zoPg7E7qIu-YUeX#YbQMf#d8-^r;1z!PB%{WcNEXPXTfhw#a3PHm6e98DOytGj~fEy zXr*_c3a4FeCQIsE(5M-V4$!H1V{((4h{6ffTFc7=pX&05GR1uOtDL9OfsYI zogX6lKjMD$jun@G!hYWx>{d4+_1)0Q6&)`!FjR@&!i=U3)gJ8{?;49?GePpggQ`@q zqrCAl-_C|L7|Jx~zyJeB>Nk6%^)SEzU0&?>I}z_KX-iX^Skx%yq{35M+OU7Mqt<*7tarPfk|$T5%FxX1kjGp|Z9#cL-UeAjEzrwx1@sbFI!GM(>2I>+T$r%l%T! zL<3o!ipSp+$t2 ziaK=uc-8TQAZo>ZBJt+&%8^lQ;?s#$@#I7#?sF(G2Uav^)7Vbv^XH{8sFfIQD85AO zhTm1_@Gdb!@2o`Zs$b2(KwDpb9dfpo*2=(3PxryPyp7M*qVT3pHv=UVUH$T^>xyt2 zfErlgTj=*(i6_VwLdLEsvXfV@pw#kfeC9dBoju_$DAoSi<76P2crQu4v@p({fnhE= z1BLXZ|6xlo=hH*;y2u>7u^hG!q1b@yp3CPN|BCc`ngIFsruzNri4jG$l;Z_634PW= z_O|7a_QpR^wieOz^U&qpy=jW<61XoQ;Nb@hxT-vEPam^2c}x|71S=#!aHW` zFor~YRTZ+cQ+JoX{l45TU(kZ^-)QbeW#W@dl%-21W4u3po0E@t02`~B+KdcujO6y2 z^M(JM5(W4C1|-0?%alk_OYO~H`XV{XDIh4VnW=3ZQgJ{g;&B^d66!*blYtQ2HpGhaOhD;}c9ZrTES-zDLDts`IMOjhD?c`93!XETHt9iZwTkAz!KvMV@j}LjBLC0 z$|DtjHIr}8%M;Ow%1`7`|7||P$V*;`8Xx9EaYmGXZEmF@3b_OU98Nv<;lR+OjyYP+ z{vDhD^}uNX4)ALExuxUo^cr$~?hLs;rB+eW>C2eB#=Xapqs0u+itoBKQ?%)md46CfTx1ee-n z5T+|sS_rQo^S0>IXa+ZGx&)@gf>EPykp@0)Y$HKydNkp8&Tu*BE)-N0lpD8KeBil5 z;7%WO$gVH`3EyEK^>L(qJ?VMu(BO2zb>19wbR6c}f`ZOUvZ6wR^U3zf=ETgr_%1{@ z5vIUK@ha^{!+3Z4uq#nS=R_~T`SOxqM~!Mn3WyGazM+n>o}(qOm$H6Fn35qH zwcU&D%ZHjoZC5YYq#t4F!ec}m5Eq-Ae`oz}9(qc&EyogH8CyURZbB}1lmpXO%b@-s z>pOanUi{kXi;RUSE*goLkg4XRy8$@^(kxLZ-G24*BY6qpd0qm@`MJ?2rea7@kqn{Q zJhvh!Cmx)h2m4MZ96_!F;}ezqZ1*b!IZ5IjwF76@trb4JClkzwccnW6U%$?{q&RVA zhj&f5XJC$!GLYE3V(#r!E}^G>CMucw+FqKUk^P*jP)x9zZTd5ce4Y+RY*w7E4;97B zA45C5pL>nkmFOeunS;1%jOP9=8=V4U))7t-h5)kDy3W0hE0X40 zoJVaW+|&JfOGY$FQq|+wR@j3q@OxsE&dH-r2(DNCJ9nF=DNy<7BY&0`gk1>ysXJgTt1;k~WVeapCelv}`i4ma7u!uBR> zK1ahxH|L=}UZlUwdNm_MRS@3A@Z~-C*G1$a{{a_8VKj~jMzX;<05vj?N6|P(Y?>ow z`fFFJPg*YRO*iz%=o1_9uq+Tz2oOnb_p~{1n4UM!l}V+ zm$a$h(`l~)M+hfiu-FF;)O|UNz~>bc?G+}EXlr`>vWZ^Y@O6COScD$60Ce%zxyJQc@bZF}mppU#ARfRaOPI+BZYPX%6o<5}w=%2HvN;J(`MEK3K&Nw`32w_4?T} zdWaM3r+&-1R(juBRK~7Lph3&@bXx$+2*2sOlivE-dOIg`z5HuY)Rx>nM+T3I*YJr| zd^05v#U^%TVGX-ga#sp(zSCQ9$1%tzRhy#|R8ND>kX$Cg^;0~7Ynb?B0_A`LeJ%}B zkbYqR-GM@7#-*fY`t8^0M?~xV_;eP_28nOBK2HsbC$>!yT#Z9<)>l`9gf2}TC{2xc zwKs)I?-K5S&|v| zcc7hke2AtZ|5EpdbBUMtr94LIwA^9=%}>EoHS~n)mDF3E#p4@e+so<}veh)c{2td< zKOircMXvcmDT`Z)dA^bDBEcwEO!#EMq1)G@uY0H^(L8xDX}URPj}D<#_&_7TVy>k( z5Z`&Jn^a_eJp2a7UIeEYHDHSU*M+h^XX3X_*&T3Et9;Cd?EWhZE8Z`&{nab8;SGW3 zes&qL84;#U2US&x7qXHW28H?^+N-TRB`P&aJ54^}#aB8p&yK4P@PVW6PfCH=#h0|R zfV8pX((7C=<7rYN4>TZBO$d%D4tuBkO7RMeL8qcS@`cd}P}}p;6k~Y}crb@t z$gGx0p-{ds>)yRCDa6Arq4~zulb4z-3+0g@0@R5$@qp{zW4ApmS(ke+Wb}d|F$L+> zCrAz?C-6Kx=z&oa40qKUe7%@jcmMjw>)HT%SGAct2Pi2Rj`G>*nUrLnxnR@u$_}(p zGM&7MB5@n|BpJnG(?AhBl}0D`5!J<6zL}5PloKPxGg6z0!@Z&R6ATMa6JZO=Z;X$R zrjIj|=-?ikYGt=Fok~rXLUzB1e0;vB3q%A*bDpg&s8Sc4n_79?Pfnspq@nEcQkYb3 z;(9Ns4 zz!5t*z$WBHhUM!ht=Y9;OQ&~|m>WM3O_=sxTh21+K9V(qmZE2{KkcuS;LI`)tF!kT z%NQ*Plr>eyS~U{2v`Ku&l3tONoTN-~0w-MP`l8%VHQ5Lfy*Z25q1B z(KMX_*RKw{QwfW#a}Unx^VBi6q5G#M&EmMwf}D@8XXkfY!;6>NTFr564Sey$zVX9; zk=}gwvlAxrQ(rridqU32G8n4m8D-Z&cvL7Ezq?X-38n%*Te6=K6T7MGxKB9J5<_37Tv=9NFKzR@tlA-$_1iVY6 zSyZ+~R)8|OYz9AIi^c#XxmhNkRcoPBb1wdTw4<1dXWh|Ql#P4RUtdH-o?8B;$h3CU zQq7Ss*UUo2(5vCQp`sZm=j$+ys>*edRQ60wxmwhcCiSmSw|V6)q*E>==V^eo%^UCN zH~;RoN%IP5jW5S}F4FW~LPSQI~_Gv6^Stz^YFR$a7$7X>RKWfnO;R zv82W!)8rxVYl-CXf6N3H5UteG1RG$Q9S04Jn=v-}p3CH3EcD%wdsn+95(_2=;dT-- zO&l2U$+;N=MB{COy#yvF+M0Fl&b4Mas*&TRXGd8 zs^5)Q+tBK+W0r4+MqY?BOxMq?URZ77U-6NaPQ^-KwBtUP@y$^!coPN#vcMvL0bH}` zY7{BcS%={s?19cqvITj#>T9Re^z6M!<^K1gZ7E@MnP1Ro>#160Ne(7?JcnNi$_p1W zYcjI30czdZjM)t7xszZH=RC=;+y9z(N2frk#f6onsT4hYz(+;ZyB}z>ov`QIGhZL@ zytA`Tw~^P;k<323ZAP{M$J)#5Gu7T?U@x%Qb6K7IO&F5MTH`UubiE5NwF|^&c?&}* zHcgLykOnTZz0aA)FSmf{3*d_MtMC;H;oOuD8Q+HanG2J#>n%@*NNZdC`ZtQDvoJ9 zg0;vLBUWggqmgJYXa7H=!u4H>{J*GhNG~_72a5F#8MYV){nbp!qnd=gfnMtuo9mad z<|)L=tiABvd5lIqnez_EE zKKg{y=Ep_-W&`tx1^jM=X-ojT^WGrDOk|WJwDL~Qs>|nuPqPn{hdR?Y!Sm=` zOf$_oA)8t`rtaCPQl>|hJ`Ihr$F!rU4pO4;?F97URL5;3iO~z`p5y_BG>{<0?IS5#sI{xV;!|D2Zo>Ta9Py)#jWxn<2f z@p|A(%biD-CYt6HhH`k4O$PQ%=!8eAc{LpM_kN*!-1Y8K!xomeNZ*RLzpLE0B{?xg zizsBVVRkAxraMdQGsF`+?4vgc?xQ(<2lDAjtnD|6qHXXi7-NaYaxU6wUr4A7IVy!^ z4}M@0Z{OOvX|lV_lVzdshK$3LtrZ|A&vI~0^DVu z0CE2)p790mGB>lih$`h<)Pt#CaR*33X4pv%_z@BPUmSOcS2szK_|gw!YU-aq@`qfy^KfVpE^nJlP*|x#zX#%l$4s#y1zTp&yO%{TlVosHub`8 zr3E2Pp}cW(Xy?a+i|nDZLK}aJF^^!WqN7pDe%|R<5+z~3{4@<3q|aHiJxY|a9CoDK z<@no>#Zi#TD}RLb%`n=$5fjxTGqz#Ls81Q&#%_|$K7i)UaSL#1hn=%{XBWNr1+zjL znxi<*sw5{0-_IjG+xRN|V`*`38o>{A|2&x+6{U%l2XCa+6K;8L-J>}2Ca7G(4ambEHV z)j7N}`=O572qL*lnr=L1vnmh|B@cpg_w-XK!aZHkUAkpF)ha=Gp1t*{b`iKbVJq zNkjT4`tA0Vm{|i@g#}t62(_`^Yv5m@yZ*qbTE7C?_&_Tr(9#t20Q|*s`ioALmFDmp z*VYn;_lLcv%d|f7;0=@7h6@lv2wt@R-!N0x5X{t?g^#y(5a2Z?0`B~;=IZPQ=ynY3P6hRMT9{{G;N^eF?l<=J4H$OVgpoFYvw)P5JX;iUC^fYe;Vc=ng8?O|BM0b>CZCr=QlIJI|#20LW*0I z{_mJ#?+*cF)icKj?+}1@4+Z7b|3nL8EduDu;eVY2a_!!HOI%0UV9;Omy#G7~g8%&S zUyKq$@Uo`0*m)8`nG&MD!+k7+(D=rVuTdby*6;s54}UExZ(Tn~7vV@9#|Q0Xz-#XG zBVhcO7|P~Yzx)lC=Dpbzkva|}Km_)O?>BbnIR0NJmSqQsS3P67{3Ft!|KPkmhHiQr zVtPQRw+LW&Z|{G}G5gop!@s{QXjrxhC~Idlw^?;8fYAD|F(CNAf6l7YJ%rZRXnfLy zLKP0gH=J@pLNbu2oxZt0{R=kt->)KwZvH9K-)V$@Ux-@8 z0NYmUA*AFk@$U554`vHw{TG+;w*mMU6gbvwV0P~1%pFhwq0kXRjsRT$WfF)19^t=< z`tL9Oci!5nM*SiL+V^9y1T?b-x<$SD4OIK1Jb(WM2viRBTsA|jk`7kh0iB)_1Ll7{ z@n1}+^CXa950U0LFaWV}>jV(v_6Gs?ACs|atfKo5!94wDRN`-dNBv3|~UhKNQxS|1X6FuRgj%vJ4jfp|J6t`{0jf z#oqzP55GZ-zQ79K(&7IW(NA~!8sL0$mva4kNrGm(|BdBckp!$!zLiY^gHN}eelY(N zVskDH44V4~V)J?J405ycU+H|1*X;fG8^d3MNJV_l4n+TSfPdNA11|Ojyy|BU*Z&*J zX5JOZ@E=C_SqBvM`#AXr+ZSqy{x_QEBU|fYJ!BzZLTmp7;_pxWPS*Yf`%lFYto;gF zn+bq$OCN*&X^hJl!L#4mW|x3pkagAsk;MNcVDsV`i0!hS5aeTPWzY2)~yOX{_ik0f8$fXK>qoU&aMz1tv~$t23)%U zU=x9SHyx}E(TUp!7r^;%#vUXq&G~yv)e=Yfy-)R)C)w*Wzx|IDzP5j( zrLCfXHu`Tjss)l+{y#ZLv=812e;g#mr`r{R|8E{r4FrN)ZxQ-?gstAePeK3HM*%+v zTwjBB{wOD8-LykJ{P8#-Q$kfM^>?!A>Ydf5+QX{Z-L2!@>Eq+Sz@#t3Tg1N;GT1V(`w@su`fmD}j}K_6TRn`wI}Q5HGCn)L zSXBgu)jhB5?`{R$M^{ESR|7Dva02{BNsvgrc2?~%SNmEl5;a^sq&-h%g4c(27}h@3 zR%mgmPuk?wSAJM9P&)D**5WK_52GPx279*Z|^$%ncl(C-y6orOE+2zm+ZjhXKtB#uu~c9FraH|Jd1 z!URqxsocev`SJ;xiuxwJ$yc_>P=@PVz}m6&JMf65g>*{CR!*7pw|>y)hhAC79wkoi z2%2S|2g{#d-F$1gWl6hM3UIpz8t#Edzb_DU%=|GjiX7OQ_*Xc4{w2v4%5{rQ%+Jz+ z;&11{zM3gZDg7$Hxc(miaX^m0F3dz_-J3n};*jex#4;VtKnb;?YR~4;%65$e8=2n~9yBRN;<7 zv5HJ1$RFRZaVER-z8>As;T75pC*6(<0eVxhFl1L$Q#MhM(l28#uPZIv$<1Aud5F{6F^oy}NB2YZQmi-})37IWw_)Ov&;s8O@~c_l>kc65dI0PuijdMQxZ(-v?@G8o7-Lzw_)zl(!+1LO!HuiM|p&5<#=9q?( z3`uZTz~}lygx#05W5|>N0D6y$1Pd~`S#At=>)RBj@|8$f7?OndvrhUerh`uQ@=U*y z)2#TRQx?jD;6of>0ka_)XTteO!bY$fBrI=;b~98Vhx$0C{VBnrm^xO; z2#YMiYqE|&N!;avC0E^E`;~)(8I3@u<>}MOSv*F&=GlIDzq7K^S;;z91f{F#NLH}h z>DUzDjj7Z%LuW5WIuW?K1R=zySf{Ly(=asQ%8$m!Oq?WyLuN@hUyPDFywSR@8$eZ( z;2Xro%BAom4Z|}P6S=3)RrL$aQVw=GGaRMRhB3L&!96W))b-jS*V%?F@T|nHFNfj* zL|&ZGITpk-{*3+XaEmi&hnn(vXm+C+pC8DL`Y30h(-DM=*9o2xh$B&vcwYNdc1sg5 z!h*v;U4#4Xe9#;8HoB(PNgd{E9CvG+c8d#z{JO#^QfO*OEC&QIP0X*BU727-=+FBpPbsh?HqbXL$_9EUgR z{MIfa$~6NlmDP)$0?hiud;yBJ$2pxu2&0pPGMI@Fa}L=k1ykk_kk46kR#03lGVF8$ zb{)wR;Q;gLh~Xqq5q=j^6+cZvSi2nl%&5wXt=>jUvQq9_?W?(F%SkkUov@kOg}OD@ zQCDH$gayc9>E$?CP~`DVL4`e4#>HgJf)kc#=W9bLWhKXG73Vs^6+Ik+q66reZo{gK zbs&GRTQjN)V-u(}Jt|lE>RX0`?{J7CpCqRC8k{#QF?MfMs0N(i$hN*_|Bg-l6Z<8c%8p~umWeH} zl6$>9csW4l%*gGHv8+;b82X}!PzRM}!*R6wqPtdet0C-k@&;Bzb?O|AwK^S_?VJw4 z#z~)AlKTQ+jQy1{s8a1LWrJl7q{5xrGCrAa*GkUiER)?Jd5Z@Az{X|Fov8)uTdN=v zY^{hX8VQojaX3VSncT+OpHL1+&RfV8RoA7JChlDESaK>kY*A>DMiWIhH9>{FaRQEO zKut4ZdPQ#hkm0KiP*1Z=j;Ef*9a_plFKv{7c{iG$i^9mNx3F+G>$k!ph! zxzn#+a`dg7p}*_m=dk)zLcfj<9%h|gVV`+g9jT1T6jeBc<6S9Q<>8!wImPDACQB{@*J>lI0~sM6NUh9LKhKtQD-%vQ7>?>BnwjSgiezX=eI3xh`%CbnZ(0G z+_L(^`~-R;G<;n)%_}AtN!46;@;u54(#8f_6ynDz@aHr2b4F&sVGTB4zN7M$F-K@q zTprcOEFgwwlt>2&CUAuiimAn2$~n!Q=slE0{mx3K5bsAZI_CuoljZxgB8l93@5=*S zcsL52&^*9d-#jb`su8$Q%CYxXuZ&rv?3 zhXGfKqUEL82%9q#6B-qG8lG+a8PVqI%G?77f%dP#shE;TXqi-I1A^@#7qg&KXm^Ni zJGpzzryE#V`uh6s6yHb;P4H~Q!oUB$Zw_Z=K|6$yAOdQrlv?W6>}ey&iM)_`^hJgH$u%udWB&ATR}&I&37V9Iz`nDQT0z{Jcq z%y6?b0-2Q)xk;4)BPi{{WXve7Nf_KhfK|!QsqO_9+DZndGLiJ;O+jNx!K?l@gUV4p z^|xdJpmVo1h|CR9##OsouIt9@b{2U|5)Ls0e4;IgPs|&kv4ZwPOlN$#+lFmdXfV{h z!|5)&d&G0Un)%{_C3r#{o-ojT#M5H|*F;i;Hf`mE)Nj z_qyW!fpjR}D`mMn)%rO&vJ6HLjXZV0fLFNU3T5MCmdvmkJzQ02^iG{aFuoFsq2==_ zK{Nb`&e9o5xon+9`uIXK63}#(S@T{4%R91l!C{9+P!|-@Xhg&{A&P}RqrZ~3@<`2$ z?oCB7QZ_eRp3V>(n?AM09~*{jt+FZ5QTdf z^g0T#z=&PT7N<1MZRCV-A`ljc3!I2l>+8U3piUcQM z;99;oDghR>o&(lBXe61p(`ZCJ3xqu(9m!6;QOa*d?30e|F@cNG)}S{~iBK|;&q7&p z9y)83X)Q73466Am=r$J;2%pX36vVxn2eb0tJXZ!hoZ-tfLzl5y`bD1Ef|nV0)~*r= z_p}NCZu`i0QVaq+FIP!wd}ghpT#;h5%86Y3A-i7dfGR(Z=9kLfB9fFv=V3&vIi7EXqhB%g zLzV_0f`Mx$$rx=jA6X{HZ(`_I^4;hd-PRW!*u~`uIeM;u%Vq@|**?W%nmC zLKL=;xan%AAI*ItMgzY`u6XYov>Gu%T*oDKCzT9r0<*l?LnNR|GTJ>)QZ9556BbDl z&b!u^mW5bm_iWI#Z&Z}IPGTESPLjKYd zhq@MrDfbBG!W$G+_cZAL-v!T*r=#xB1Qi|TGolVH{KE#=yXzXFtscTcU?0T4aHbf4 z5j8P4ECt?Lq8>y18n9v4MSUEGByd*Ssn+-G!Ag`JZ1=3lH1oFxHN>jHxNDXG7%N*P zC|uvLG(q^9XNXQ`BRStupq8;Lzoth9S-d39O=lxc9D$-2A6x{fr-L~1EfM>oSfm6)GG40O>y}^tA3W?@( zoG1>*n-4r+n}(c{D9}u9#so^X8DXy1UbYGm>h*f&Pjm76=M}m6*1XK6-d)M$YyVVS zdIG&E*S~-5y?gtuMB$INh7})a<%Eg@qgfI&&+>gXMu$)tXF(W+j$2^#Y6izZJ+s;A z7(P3@dyi9P+>KLkFuEcUUq?jvJtdBoXTFt0_c~=2q%zfsRA`YF2c?UWh+Lb|GRS1I z1bh+{HJJt&qX7;As#M-Nr6Xt$p)0uY@s44}I7pCm4fRbeeoYv9cd^;1*yII`xfU&` za9h=Sat(md@;h<~W5ZI>NGbJN&O$)lo8hfQs^&7#DlmoD@;F$lp#8vf>rgpdBxy#= zBtZbmvz8kizUH2!DJIo-adgYl-lnnXkRim8k8AP8I~I^3+TPsA1n0)sJ(God1xk7@ zLCy=y5yELeL70NLC_q{$Nj}9?2~Mqv)yv)Ojg3MvCHS}Ku@ka?cBGizy1%Zs|HUr^S7@wR$Y8Gx7Jxa_KkKC{Pa##xCXVQ;QCwrtf=sLHqpe)Zfi~_#yMJ^cOcekfXxJ0_z=Kl%h)Rp=bP$STeC!M6Kk=nIf&^8|pYejRwnk z1uh|4dh9w`Dbc!HQWvtR@hz`!M9*4ITY8@+p&f~HLnukeOnG&wbhQ}>A}1HkRJM;$N`vb1@>;1(dzD1ge)xqmEj?UuiLKFsI-()BK#)0PNJM80 zcz#)A7G?KZ3^*i-&^B#Kv*h_x97SL^wbHS@n5AJ~xCc&7b4swp{iynUC)vgK$)uO943p);T^h*5mEq3pXibvb2spX` zcrNt2|1w%%c_^x-PeLNdg){cLeXH%=tT^D_`_~xI={wnF3^6+l0|)#^NJrKZF{{x3 zr-1>m^{v|f`l~sx95SH4Sil!h16!`J4xU@Th-|u!+^HP1Fpx*_^XK_Rq6KYgU#J)- z2NLbjX>PY=o6wzOWghBIah$PeH7GLLp+&Q)bS5kTw*L72`QZ?qD{HK*F*q?`UVy(; zqWer*hswAw-4o5SkZfaV=AE8ml34t&0~l8N6lr186}joyj7daaNlL;Qtl*DpFs;&1)@EPB;uV7-_P_VeL=WzAw`PMg|%DRyf?y}>~1gLi#OWc zZte-8(eAePCpP*Zr9V+}1TZ%G1d``aujO|*&MlQ5NT{op3J9^XO6CjP#6fJZmsNAt zp`k)Xe6FZCMC~a-!Vv>w2*>+$9TX%qLL(-oy=)Ik5J2gs5jM0t0M1v%&M6a~{DJ}t z2!$-WRC9YEgT~T5b6jWvz#A+6j}tn>$xYsDE8G8CNAXIV z8m!*Iv=4_jg8CXV2uXD@X30eg-bCE;;~|^qG!SsD^;yJ~SEp|HA~Fs_x$WaR-LI@z zD!rGtmC^DRlT5>k$uk*%o;hZ9Jrb=QP}NJ;(`4LG!mM#>BJBBSjD1-@O+ujclJ3gI zo3LxH>98^%Hg;=D+X<`%@96s_W8y-NO8%}S`^?WgwUtRuU$T+f;JCk{24>7^Xq0*X ztN_2MqYn&qheZv(I?g33E;yY;7ipaT#C;aasY*FXh5-7yyiW~TGBK4?F}=u7OlAum zvI&j)3?R0_(sV_jXzgkOSAL;|3Wfv8SH&^yIn|ZcQuRad?1F;JK{i?vT(D$x+x`h1;ti?afo3Y};djCITu-1o7NRRZScyn=ge%=weF3 zShABM4s+a-j_B)HZc7Z=GsTHHs?>{StR}S8eQ4^6Y|WFcE!T>y(O=0Uz~H~a9O*Kz zZ-?MyI8!I+$ah5zzLtZV;Eb~U0?v($%`76YbCJjRr7Y~piM#C8sry&ZrD_ep(E*2r zj@^D~XZ|uT?B*(DuklII;$m%lW|ore!Th}S9KU16$wkN}6FIO~nMDgi=;KF4dil>k zJ9hZ!73$7zV3xF0z{t_SvE>!o-P+YJQcf;74^6Y0v6Z{&LWCT>a^?gf&ch3SMdOP( z#9Si$fVjG+PXu`!_ZSqP!nvJlxb$yF+R z*$}N@E)C}{tXQtSEOc0W6?IZf$qZ2w?KUB@HY7MvS5w}}B4k5)q>x}P>}K1MSh6Ai zeH=X(*7#Gf2>}*Z*)C+UyUUlqbyuDSO*uC{u0}pw!jkKXUeifJ1Mli3_jLWtoNLe< zZ1i3fjJXzhsFtkV^XBeF1NO{53asjsC^l#Du4?R*D(q}%BP_Ws&qJ<)p(^J#6$TYf z$155mxt(F(nv)p35z-Nn90HgG@N!G`)KgpPLqz|blGijO%GJm2{~+hCSZV%vwr^(8 zanD`R%Pxz4U09#aoe3ud&2Vz1x4L4|Ro*tk{@T*Iam5L+@sho$D6&}xpamVGC=Em1 zuv+hisK+eGA@_&&J=lBuR(X8_xRN&r!>mJvdG+x)b2)5}hLEuQ$L-`& z(NkvTXOiG}3Z}efDF1Rye1r-$Y{Km4iRq=4VrVM;I$<-phxr-kanpoc!tn(_WRNJiXH}byAlI}CW_*QV zbxJ~BQ`(jCD&9jL!)m*$Zar|+8<8oVQha`jx|40j@Mr2&xx1g1AKiahnQ0cJlO z>6%dQ1N~rR6;-Vo7C@ULL3jd(7T8s#ng?dXL^;e5R#JWBdk7FohJFGNDl6W3rPAntdWHY4}WI1$$qZhmDnzc=| zS+Q2jxa_Jj75`*wYGuVH*3fx1tUaP_05rVjhbe8v-p9(8m5q^P`c4&bgH#qk>*R8i z4H)&y_f+1lZnPJT4zaWcX4IU3wXeKa&6ptSl3{G+5(hM67^Q+8o$3PWo_d(chE%Md ziu&kp*6X)o$+>aFa+j!;Vv9!_Ky$ZReAOz%UGnk`HuWO#B)gMs_BMKhrqXe58U)cofK@i`dgDRr;D4xo3j<0CZ z+GnvuZF3tuO*b(WB7WYYuF149Vqhw&y$Pbgf!T@6_HyIppp*hyzg$5_5%MwD_q7X? zHblurfW8tERGwG`G9fSWBkW&gN};oY!=%eW7e?pi3{&2JxgbC~;>n6J;5Q-8vIaEq zP#G=-Lsx{volN;$qJGF^fz&%qBi*=#qht50%ECRP^0*(dGzySN@OVsp`N3vNF%nkC z-&-94Q1wWMAaO+yi+W?JxNCQZy0?tDKBi#k_l~Ih$9fjWe>Myq?;pQEKRoM+Ph!bF z7R3MG*%`bj#D5-aZ#?Dyew1g>LvM&s{w~j*OlQ``6n**f90c)}<(I@?^R3`{j!dl5 z2@&RM;&~bG`}uRG;prLJSS>JJNK(;y6xnvz==u^%lelv$froBBlK2s-* z8;uQcs+kpqs1}3lwZT?7NnPjjXGF&qU=tF_D4f4QTM(94Z=6hj6D zB?lpnv5I4Uqf_K)^${FemzS3vR1=!gjU~C^ywTAk^{a9Ay8|j_?KZ0dX>lTa9C!7v z=RS@vA|ft8=cec6)&C~(b5o+9@}$p4H0o!5A~4B+`f}6aJQ7~i*R{;+p_ZY|x7s~s0Yr!Dmi7|J{yYOI`2>6G`2E4*#mW2A^HMpm1xxmE`C=%fIsO8InOaH$|OfRqa?Qb~-p@v(fTe|+g0 zc>F0dGyP0MHGF0uIx$SU z32xofLQP~v_+Top77&b5GS<#od!9R0=KdX!dfjGLpXX-v?C|u*qy37By%Pk>9%|1& zn;8!sY?{q2C!fY&p0DLL_p=P-Vcn7CIXgbL3~boME~;Jxh5dQ*{s1B}6_5kbWtX3y z5Or}J7ozCPQm<<3tE%heY9__JtlD!EyP_{&y0!ajMg#^nLbh#plo{I6BLW@7m?hUZ z3D7fxEEU)^M||y{ovj{v%O*6+Y{D#?1-t|oBt`=p$wo3_O21RXt977tItE)=o3=KK z3gof`D-ZqX77;=}?VTPU9lsf7(VkR5A}#a@{_>Z<03pySLQWL71ZKS8REAKYMf~P9ROe)qt5{j)n`j-zIqJfM%K74#EGk2yL^V{p6aeE!`3reofkkk~+Ma1dA%5(qM>Y-VYcep1}qdPf$4$Q1gu zn|o6mb3yJ53B>(p|01h9z|#0SS?xGrZfz1keobarif$ZWz*~YL{#fgUdTS=C^#As zA3_dsHp^udS5gT|LT%**l2@}%DMzc?23t!oz>G5{{#vhdG)6kHqH4r)jVgRNyrUwg zxl}IC-~*wctwn9d1XoZL^U-3|l&LMy3gu>{$sf-E{Nbziv+BTUF`-(?vzr#y+}ouA z%qXSzSKckOrutoGoi*+vv;Lfkg+!NnA7gHHK6ckA6??-3L#b}r{aDugubc=zN{#$T&MYBJ#8~)WE!78+r_nNk|S4L0@v!=i`4BUzcvaj zn{+^>JA7p$)eE4__|FL!_rw7#fkmk3dP4Tph!Y9RgWx6CQ0?Z}v$?fha;VcOfRIDy z8wiLpFNQdK&ebu~i|Xi0vk|yIDn7gU_|dmZHNpbwOXe94BD{sADzH}|S_*onbUpoW z)6>)sNPSnMnUZkU<5P9nfZJ?oVQ3PfMVIC8`B1stOvXvah$V)u?Ad(3nXq(*C!{Cv zL{=z^5^Q2u=Zc+Gxh)-zgKMaK|tApiDmlZ*-$NF<95BMo@X1IT?R6It2R{ zo`v-VO45dGx=a-Jg%ei9uva-6D~~A6P$sddsAeQKc@!L1(5s~uS1jl)LT-`xT52yv~Z|56j$j zY47An7vjC|F*jAU`>RU#p~6h{=YuYCIjBWt>siU>n(~@9XyzJ!Z$ZV=^P4`X zR9(b+73@*L0zvgrFiI%%OvPHah(-a41lp`%p`6~_WMY!9aLhR?dT~p)?V)7iDa%yY zeL4uQtWxe_6*!Xyy^#6lc)wVDDZBNa8w0Lb&6LT4ffY+sWX4&O0MdN2oJ!bn6tn_9 zmsY@jT~q2z6M9M(@_bp!v++*~^N?>x@nC$4jat-Xdd;ofTG_c=xGqOm(ROA>j=p>u zE-0+{$N%C}yt%WxsIY>%YX|9Ae|#0ga3G6<9AK`;@q(r@>vhvYwP{iz`K{}_RkB(# zCQ0aH7uHI59d*4^`EO3!p6jM(TPcjcvK&}INffN3ZZWn__h;>2V7*erOQ>!GJXM54?6uAmy8+I-b$`p+9n`*hhw=LX9y_~zB1G|b7Z(!gym~khw&aiiW z5jSEf-$l`|aS@qQwwl#cuEorjNm_Nmub#M?{g*c*Ek$oHuN>KnQ7TjnM}D?H-7;C2 zWm&}hazR;0_M7<6+&Qqoe9Ys%=SDiNI3i3}56kl9Tdk#?`C_$)KoyL?0qMU7&`Xo~ zTq7qX+TLnCJS+iP+()ls4O@zx-#hOP&%b|vP^7nSkIv2u1>f#{clfr7E;oUtT&6RE z-U3^h-+g(b0}K~G?!EnRScK&BXDw%5w(SCnvd&(=e|vCvS{iX}{K&r3Q@7C8>EYS? z52yPjsJkO3rVEf$8S$qK?uX-}|MTGx66!z-qc&4j*L1fIH!bF`;NV>b)FxP3b}^M( zUKY-LaHbMuXFnXBT%5l>yZG_&^yu}!+Q3(Au?C_>w#&42=W9~#=*ZFxln%nSYgYqb zf7+ELDa)R9Sv>fslkN|4(ml%Ik*%!a(Q#+8Y@7TKBgz>nAC@FHwfjVnH9@vWx*Ium z)@&^?t`J%`*kKe3ww3y4LQIo*>&uspB@p^urT1S|#r&1c={XOBUl6%havaX(`>xk5 zH%8&p5%gy|f+{&lyJEo2?;R0SR>Af!anG?0w!Y@%Ydq*Jr?Kf}FFDP_)sgq)D)E$) zSVQ4bz)JMxi*f(W!1@n6n_a=&1K*b#8veta&Ta5kjCfhMwlDUMkKdo~ogYEcm*tmv zu7}_4dS|k;)N>Q9wrtrLuXHLqX7JS_t_M~*FVM~|P%Hn&isbgdEw3!DR2CI*ZF18W z6iGFr@2%>Hh{w5+^o&g=Qp|E~Sdl zxak?n6a~^PECj(OAgBN!OH0u}_$m}2`zF}rL+e0tsv4u0$?SQ%-S!HT1~%jCQ#LiK z?;WLP7TWlkQ^r+q*3~SiL!Xrk(lJ_P30j?E{sXx&WxKy{7IoK3ug5eb-L!1GP`{t4WFb3M#e(jZiqz}PDy;u&r@_g(l|$?56}jQfjtI|W)Ev+oZa0!~WWUWqE+b&s12@}j`;SingpVW8lguQL(6`~o{x9o@wKbIpBk!ap~ z^ZMfKaR0;U(fPkF_72`19bbGnJ3K8TySiLnuHc$=Ae&$R2Xa%e?Lg(xLe!*QKJS#X zxE44nJNS_gMC>RvuJc#9OIF3JgmhL7th~mYykD*^M&HTa+1XF;PY>3XDSQFKxC)-F zzRDj7^-iL&GWu=au*Fkg_lC~%Uxlut#Hm-~T zHwkMC86RYf%TGY%N_cmE^03>kvFGX;g(?+$sj!QRU_bb>y({Q7Yq8xbEJ*{cD{U{*WfeIyun3;7b2O*AN4>J%&`Ev1pD7OW=Z^q^<;rV$YfhzU55j|5N| zv#jSxyEiLkEm?D@(r_bJqS0a_H#|HZ(})VVXPCcp_^NNq3H(+&!d!5;MT2zX)K!kr zEl>-Vy7s~qr9#4E*%!0i+p{doNZac!T=m`e?5C2lqfz{vr_6N{&k8x`0vYGs5GBc@ zSJPsZN@>9W^MYM{Bt1(Q6F}iBP9{eLZ$hLkZnmXxya6ThD1rVIaLbOktVt?qkxYj6 zjL|C9ui@-e&62})W1XUAwA4yV2EW7*Lg?b`sL`<^Z60su42nxSu(FtRkw5h+`FG`a zhz;G1=YF@a8C>0ytss0nUCY}5Y7subARin{6NLN$j--(tz84S%xK-1X}snV@l{y zZ{#j!3n}{C9A)qhwuE&;_)?$C$BHxA0Vl(0zFk?P-ejZl%Cnaw*sU}h+YRu+XbAzMJTYFEU-&O8#($v zYVh_4c$SI(q~mBOEF_>4Y}?0z{9l8e!B!#u)7Id{Q~dWwc`6t|2o$FSx#koK@WIN< zg0+gTDJO@TVw|#&0MB$qqhN@1suB6NlZD9}qNcEP9h09!5+@aPFf+$1v4Not!^>4=Q@dVLeSpH5t_(?`NWVPyIG3o?mFtKL!LwI zjyXw2P-H@c{01%s@{f>m(fzrg`OnRCJw^(LM>f+NsO&1EnO}uK^s(5b`s?@J%6EG1 zNdHH2W}`{FV_*^e-`d_N(Esh7&8PT(kMi7${#OzA@6^h;8;!pcP1S4ht2-iiI+7O! zElE{~IxFU+ZX9!M_(M}qr*&_m*y>1BX*EUpL^c+(tq%12oPC)bp`>1|>aTR_nq#J6 zQ9XsNUE*1a{+koJLWa_G5-gzqn_IiZ{7<`EJ6lim|1qAW#B?rApAQV}H-fW23%6d9 z<}FWa4Hj~{V~B1&z=w;*fe|$+TU>=OBQ1H}Y-?;=*Af+-$`Y|;auh5wRJN5_GNENd zA@?9?s;%su`RjzuUel29Gm)geNE1gkr;4;6jrDkao>zj{2E|Rh(Cyf^mP=!4@(&)6 zkceW4{iasjdQNjV4B7Pu`T^1=^VY59t7Xlp^vg$)AjuquXT)bw(7x5m+K%F6Ya{E4 z#d`Y~ZBgw!w(h>gN-#>PNB4OCquLGdF`gxk z^ks}wmpw4QHikisxyc4t|A6uR!f1rL>)oo7IWEL;i5;Tb2ID&k)+f`PlS?I{)>`yq z<_!3A(TanFWVWvU_ahF$BhmzPb#2|`Y#(vdrO_ORH0WkE9BUEz3Vr#4UUjMr8*(L# zDaWd@K-A4Cv%fl{yNayHC4-inS6_t<%VIOel4oFsJ~b72$P2cU+iR|6N}bMofrsB> zAWIdBAl5*)99VTueQl6fSM#?8lp|HKI@qr4Q!b{Jzw}d{K%iz4&5@46%ZWf9Nyk-2 zo=xtJcgIGlKGd|Fk}YLJP)mMAb7Z}^-~&}@$zQ#LI(3#@ZJeo_hy1Ku*dnvat$Gp> zaTHWIR;Nkm_*MI95+aXZjnV(8i;uj0MZ_1Lpfkc!fp$0Sry2ePud{c#+lt2WXm{IZ zk&vrRcjA4Ix_iD)VlhNGjzj9hQP2kuYTJ@+OSdD~6^U|9xT2;ZQr~T;Y+JtAqBB*p zm3;-9cMf-$A~^qmWUfTK`f&c*d)ak{Sz)QHI|dYf`I1HLhBxpcO9a)O3K8>R--c}P z*{qLaTJ%R(4XD{uw)x7nQGRthHScBHSv~t!^WM7ifwRYLVC}B+Qh7P(MlwPz&y-ul zNr0aBJrDK1L7vzD3*!B5R;D}beY2i_;RG9kQ58~MGQohn^c`OTsW%z%`yi<lrh>u=Yso;qwN$FDqd2VQCIe2Ok%po`Jip@`#wO5g) zu1%BW%yl`K>&M{cva4z;mu{!bc;qN|Gb)^nJPsP}mHMX8LvtdWLgD6|J(aL`3j@xR zi?3x!B`DqLMs2eGR4llBp1XxJUdr@?irInMt@z4zig|--hgw|xs2EGUyxg14jmr4q zgD51xnk`}6aYi<9@K=Y@vKSq2-xsECc)#*(vm@0ph;X<-c3 zBDB`_wFaWnQm#1@^`zh!k7!5*C50JBC`ed5L?55yFns=V?i)B7og{QlLoy+U+{d9d zUj&1aTv-z_|CBKSrarLKhhX*L6s*b{WpLz#!taY&TmVyHupmSbvv}+Vt!`iwvyh`X zv{i>Gb;8~@ox=ABGy1aS+1U_!w7}cez_{qT=tLmZv)&wsw5hLq#UyQuk@`5L?SOP5 zl$w>daBZQwU{n9$($0Ix{~{u;S#ssYaGW;C02ldxZSU?B{l9kxPx8M**6Q8Y;iS3!q7&&x>8_ASB>nZ#IAJs6@MQ&ZF$>NfLo(Ka3g?7JhD0v9=4O>@JItI>6k#~O ziHY@VKeF?o(PcgqSXuSrPRwT&<2lYHeeklyke>|mR7^^qiD|ilEJ(#!Z_&}xTD`$` znHw!EfXXSapZcoFuIt5$wu?S|4@+4NzZSwm9*wS$JXsO!iKrBa%AuJH;*gXTb@@R- z#Nmn%rV``L)MjU^l8$^hIB8qdken4HaCn7S6yCTq*4TS}=8*-|O|U20_NcV&_h#B2 z@VN*7-`Wank^R^1b}|0b&S2*${==g@chLVokfqiiMelExd3*hTp~hY908BFvXb(^{ z`D&-F>{$ym)xTqqwvazBRQ>$f$grJ5@E$VjJKYj0&rq8({$G#s6!*WLVng~(gpOyDcpvM8Kurl)vKo4`z%FrA zEx9de3+sNtqQuq9XmB7XMpa>X52&jE-)toa0xhfXs`AcPTO%UZdnZRwdf(B$^!84U z^jXMz-(!xtRX5MRmUML8YzC%DuW+JPUR^)C=_Hm1U_=CVplHz})W`*Cb!(7Mfrk(w$0%AX`Gew;!LG^u`lCU#7t-tfukiQyaBYOV+iOkGNwJXsP2G0+x%(SSoL##dJj`knCqJeHEZLuaCm_*Mqt05uc?4~OzkQ#&B_zr!^EjRZ=AyH?gj@Zj0DCTHp;oe%H zH-)c;IF_0@&&A3tU4Y%!BkW(1y#2%Cj;$G5v06Qu$UngDmEIy0rL&QVwBVjQ)z|rw zrHcdEaSJ${9@=(Uo5aoq#;aaJSySF06YWc_x8Hw`ib!qPXRSA5X#sd&iGScS{C_l| z(I@SYyUZnEq5k(pG5*ii=H`?C-=jQBN957V!c&VOPS~d#&*q1HOs39)B>{o#uQtH8 zWNk@u5hu(ioD;70Ax;JR2Ye?#t_ACP&Ui-4s9UE?HDWn&tY2_J&+Uq1Gy zMC}UoZ!f07Ka@2(nUhFx^x04X4U!IT2b|nsV{=>e@kP~<)Zf-Hh|v8J;p?7ebf)|% z`9E~gkWDVeESX_}K<-W>^!(#9L1*M5WIhg&yp0JgQvNe~mXH|}hf5Cb}Xw{Nh&@kyo=D`2TRQu`xq5 z!oHw$@@f#E8UA#^>96F~U}Iv@A)n%q*K-TG;AJNfmN&nNP~UvFpe z_AQ%CZrNmhdrd~OwP$O&)r=7N6w|~INB#Ugzx2Lzihq64NA~Z{xjoFY4F4aqAfP-+ zAyV-u4JM>rAh<>P-_6~k|M&Kbtta{aqda9RiUT{v-@z0Y1;NSy#|nYfjp3wR?>{ZV zn-Zrk#%FIvqrEw%A#gVUzxyVir@ck>`an#TwehD95iWF}d$$;XQWH4|$8Q)f?)Mg! z{0W~W>A&$&T4?j{J`3po=EiO*|L6AR;EDb}##0~}kW?||@nN;;PaW+))63#M7Ph}H zy%qH9yV{&yNoDj~|ZS zzSV!89iG3j@6Qg;Ke#_m_V-Touj9k>i|>w(4=&CQPk%hxKXhK7?)_w{{Ojyu|NGPT z@6Yv%cRw7zH|_0xI5;}L`04cM{4nd`wdx_~0p_9Sz7beHH*buumwgc@>=)t-64*CB zr}l3ZThRUt*mZP`lVI=UC~LrkGZQ~T?-21!ewCLrZDF3xlze$~vY&r(a&+L#Wg6}A z51izqFouuo_2 zpY~bm{Ga#T`Ss_5$LL;n*NwvPGhK8x;ugPpA+{oi@hrto(lR87aBhyl3wQMepQ10wURMwj@+sw z{r0OSwu<1(>VF9OUfSCa;F0m!Mx3v^dast|#EvO|gdNtRx;|%nbupU=)0WG1115=b za!%8b@HG`qUYk-b78iI3$5h`&6e<7sJhL5!_%s>mHwB84B}!DuIY~yQlq#8Wl50cK zV_D*3i)Hh3#RNP6l3O5nt%RA6-R{q51$Db}flwdvfQuW?OGlv@{f>m(JhTUx11|x9o5BUaZOXrU)okrEfMsw)Y;S3 z@#}q-J^x2?xkgKw|1G@#ZI$9bZw)q|&i_YwstCG1`|owIf2Y>Tf9&}#_sS_5!*xni z{UK6FbMwe*@0$)n4o{@#qy_I~QPmEudCTgO(`8j3Me%hy`i1!Nn&zRL6R?gK+YC-a zY}sY!`#~;0PtQ{HzmR~Zbsr1p|HjTvG5*))i@}rr?@^x53v)Nsxcq>?h0j%&+{PIKt+@@?)LlcXVAoVx(4>7`7~^TFs7?#wIJrSeY6tL^;ov(Q;>agS z*9EeoA7Nd>>n7G|%s`WWSJ`e=wWcs0RrSt55PvE!4ZPg3`*ql~>grmo)69D%$<0FS z)!T%dlH`*hNrc0r6Fn5?MHBRHy@tpe&C$JU%-(#6)?#LMQTgr%-RF3wkJ1%g86@Kek<6y{l>s-P&v(rb&XNgg7H=+}6meR`I9 z?!fKW7484GcDJAG{~qP3J|47$-e`gr1OT!?O}_v&vVWIdd_ZkX0j39F zz8+gi@pX#+;JL&8*F_&}|8H&&O7UM_Y-~L3|HpW$_y0RR#ZCm^Vz~Wm!SsBV5XO5x zoAryNK{s^gn`AS1cE43^&vtckRN+1*z>8oFbHT}hjjQCa3*~G2DOl{1D2N%=8M!3f zf+TYsHsQgPX$L*`6C9Is9i~NB$-QjqtFm?>=GK%&hSPL%jy2u~+^d6)#8m@A>;&fG z{*+YTNHTzBiFT+FAy@H|OmX{S*mA0~7Ao=|6wuTROG04T&}d>;n)9kHJZ>w^>kpw{ z*a#*~r>Z?AA;FyVoP4wo?+^U4u-z+?UnsLc-GQ=3cR{lR;16&@|F+Lk=fA*$jMI=W zsRUdk|9w&N|J~Z$c#8kLN2N_kQ6l@|Nwgy8coiaNo17cyYq!G$6^+ zfY^>J8f$>%PiTlxMlgJAk4nejrV43Is;Za;g-`~qH-n)&wYxHAK_#)6XMUSzH2OkP z-294+Yrncur>_5Doljnc)8V~6`VcC)C04m6wsudnjV|pR2wjx2YAtpWRFT88P8Zt_ zEW`f^i3Rw#KwTE_7>ne;FJ2V$KW)F*e3Jh>%2T*c%E^75;2F7Q$<@LHP)%`y+b|2V zx8EshaFRwnKD7~p|4V{`y65uW?+FgYv}j@Fa^HkVaFCjc;5hwCZEKf(|c_t9+W+_R5C2`rz&#wgmmbFK%ab4U8Wlqu*U{%^Y!|7+*P z?vww|qdZH~|F6J8^ojUA+;g}6|Df?7Hg|T5^1sa&J5T)IqdfPL|DUknehl1wXh2D0 z-<^_h*5lLul(DPs5UGPI1x4W-R`zr@Si} zxX~*~|GH0$f`ue;?R}gVOSJ2=NMIU4)ZQZc3bvJI56K76uT3y3R|XXL@?}_CK;VhC z*R<`)Nmt_fg)L`Avo?G-n_)>MK6d*o7X1+xyg#B5{Lx)UT@L~k>QhDOG264w^PLi*f zi&%XA_as@RlZYkcj(sfR|97^E@n1Fu8&C279_3jvi*)HI{(DT&Z1rC~s$ z35v0Qg(rmfIxFaWN;&A5z$2AUNf@F@$VSRGg+`NgFgO=}@P)rgO(unjry@RuhGrCL&%0@CSeU00_^1A_aie8PU|Kq8g1uWhSWK8yGN>EYhN zyTjfrxN{$k`+qRl+TAGVe+Dl$x1Qp^J<79!-l%y--;;2L_T}7nI^Uq9hzlHsDk$+a z86j)TDM^A#4}(7Hx<9XK^B8k9vSiX<>vUFD&>JGqDTx_6ipH$dxxBpeS;SdLI`UXS ziTs3vz@!z2?5EsmX+p&`9rY;d%krLDn@-&{4zM7y-A?D?j1ctEep67%#>~YaU_OT* z@Xv)T)L%nmmKbIQ1eK=qg!p)#Et? zq*jJZup`IF7JV`LDEF2+Y zN{ckZD-R}&B65vp_!pL-xw?mTI(zzs>OjAizvRtiHKbPr%?G{B-WKw{Mf1(xMsH&+ z7j7F12}(i`ugtb)(qiRZ=eCxrhciq(R4bu=mg6vfN--O*F3lA zfT-l)vXjNntxaRL>~q_>_1Ztnl)X!Nr)K9CT^f&#%f;hAPG=*M$Z3qgz+ppx7v&he%=$TD0m?D}qZy6pES;e&7ibZ#h=zB8tRAjh>e0(OxbP={D~&24yC+;3 zAw3fb7G!c$)0AAtrYPdNY*Xv#@-ym+o0xQmsC&wQ%71|Kt&6@i!3%JObWS3|`ANb? zq_H%5tBWEii#QPioIRW1l4t1~BDzEL8FeLVFM)JzUDr`p-U_-yw6(FZfxe(Fji{hF zJRl*yIU_!c0^S{>-HmnB)poAg+b#KajA@uAj1x80$c^DTRmX;1yPXI)xg!Yc6!|*+J3RQj?^1aK-*Vi_qSXsY6m5hK^Ml& zp$@cL1TrNyMkl%of;w;naZV=@idn#2!0xkyl(0~8UtHcf>h_Q9=SUVsSQs*&Xi=HgX#4ME?C)N>AjVbJvSNAD_Oig`=g~%&t7Qg`A{9B$MG?SPQ~pw( zGIzjO4lWaFt^r>a>&w)thU-40zaoNWbz8T_bO7bjC)T(AF#Azmpc;9L{gewfGgqyJ ztv?K0{-Qm7tAf7B^`*<&**rR_hU+2bF&|d5qPbhyJr06|AGl@j*VQI z1Z#HzuNE#h$fk3d2`LwnP8^+}{iB0Zu3S7wpxsXa#8CxYAAi0FU3J1|)~$OpZKkM$ zz>2Hj9RPCSDu#`0pc8vRKov(}K~!3RF-wrnedg#d?+h5gHDz4vg%oqL;J$n-al^rV z1*S|<{opxAeIoq6Y=`H|-wR(fn#hSTmJv<7zA}*I=txyr)H=EZ_n*76h#IaIbk)G) z($&>DsCE%fh*fk3Yv}&;%fR)u(NzPNyyq?8b1c4mpTvR$O0U%bSN|Tu7#FVdCf+cE z3z$JRZU#%jbpi^vrGd-A1^cO4xKP5EtOLe1HE@0Gf}qkPpYSXWCGGZc*nNmiDv8?( z7SlS2Ghm@8pb)4?oRFAAf&7!qNpb^Y(8d8;2HHa(I2os*QkI%eG@77~zFNszM63ki zdq16_y|>@l4&}8!VPQy;OigiRIWdpZ33qfK_?mk-9C>tGxF>OO zt&|oJmtz)Ksl7Z$V-{!@34}zY^7n$vF-cl9kLR{%8|dyikH-G0g=Bos7;agK-U7Jp zgD)ToM-dhKmaC~hRH<;7AEUd`42e;0N;n@1u0=4Ze$GmKSrcdv3YW^_UkTkU(jS!$ z&c_iN5oOy&0z{(Lo|EDq3Y?N)n042n9-ScBku;16M@=xVm-!ffHnl3vF8`*A^EnAt=BC=gQxU$t?zy zYhaGf4O5g=L93VX+`mLDL6=S-y2~|y3qCFXl;Gm@*Cx2m=&yOWEYAE8(^;wf9W3Z=F&LSEUkkFLO=I(_c^A^NmaKVJKru@C=1c~PD>fWM55D$@P zP7|i?m~)&^CEtxP=LGIk#zx}4*QjU?o;I%&R^rr(a8*JyPS_0L43%mf^w1emR-t<* zM<`%EM=U}g4`pv!M^Yt~>k!UsFCp#b>P*{XC`--o1}Pt9xLATB zSXQ>)ersk^O14*v!3C4`aS#xDPj=?MhxXNl)k?~drYL3@ZsO?1+c}9i7@M|QSvU&R zwF=BV^=MnIwd@wGU{Zcn^{PZi)nnuXDrTYS6_^fg%imuG^`qnObr-lam-gUr?VC8K z4WKo_W&Ozt5Z+gsai~MCHDRm?E)%ltLBP`NV3qHgR`z&NwZZsUSXrhGW z#wEd&zt=p<1Fn^v_|_UZXJRp@GFmnJahkAnGDRW|*X~B^wUk)UF>RFJ)x%{gSC+rm z@nao5PIS=jdhJOSzI0_|LVoiHhiehSZqb1)sxLpaW;PEBmkozoM<=oZ$eyNJ1gg#; zo+0oaT$h5&c+k*jfdJ{&?6>C}#R;9^J{=u~Lb493Wx;0QB{&(bBQd2M0h^*6J*EU*ksDPI zl7(OPax8_DD!lzv0@fDj;O==7T4-&M_2XDRw=n3s?-FYv>o3FA!t&t00<&CD{zhwv z6Be|ze0vzU+NehiTrEvB9t5tFMFyX`A6G$=>9jKIPosLXa4l&do`XxPrtU1`ZiqwrF$f_f%MFxYX(WA)^0I$!i)C zxJ+N>;UYr0Bv!3s^x^pE|9m)vq%6mK?+(u@FTkx1^c-9n^HjrNWLA0LAE^yxuW3LK zCpht^Af<44Fz~Gm0^V1C#=P&iu+9*TEUhWRS_jwJzG15`E8zlJg*>a-*lNKIE1jE1 zk)(GK-PgfoGxS6*vAsoVk}bEBBd;Z}P}X+UDDMRqOD5D#=Pf!43^**A;D~a-+PdWT zucy>kp2yBa@+nKhAQS7fb(&lV*O-ObFmKUYHlaQak$NpxhyuqTZ2}R53vB)!XxKuy z0?OrzI)L?hiEh!JFX$W#s1_1`ov@iCwfsyZ zsV~yR1y*6uNKFJYDdU8*1rRqC6)GXz350RzwHn?XRfToqyT-zwz9VAFf}F-gy^>={ zmFIsz1o1_7b=I!zyoY|GVv2s+JKz6aiFGx#yryA@0*0{uX4Dz$O15{t|VxRpVxm-&J5D;l4N{6~_|aitj>9Ec!GPEutb zb46n$LU6vPV{@yO-Cd=UNW~QS5PxG?B}W~?foZQ`P=WEcLWnV)pc#&}(Q27~n!p4u z9o3}m!j@ia$JYcS=ss!z1quDVz-@!gIskLT-hu74rS|5o0T3D}Sg44t&%zLLW4NHM zrcp>Zuk247bgAamHHdP=iCF8YP)uO-4J$b_po3M~7qhru5ScZPD!Z9p>QS9fUGJ;% zCJPz6f+b6h0W{2oV2I60l1Q#f9Wh+c$SiO`6zGB^0lKH9T04~4!RT)W3dz*0TT-AS z*@7*IE|5lW0!&HcVvsyyoCY4Ijs%XU3EK#0m#Cqtx1pbX|QLX05iBF*cf<2(*7VF9> z6pU;vCFV*1V2sgBH9D@vhPvt@5-Lf^rM|vg%0XV%!h245#a3;ES6O8fE*=4JI7yWI z<_P-@GheX=B<~@?J^gNPzd_@1&ouyW+2gO#$6vw4AQ9iGQl zQNNv>Slk-P8&(P;dqzHSC2<+3DJO@Th{#rI$h4L}VU#eXurYIQ36T7NJdjMq_P5eU zL=)tpQ7RA@O1h>9yqg0E#^R)fGlrA4JGmm{ow!%A3bK4+i?LnKCW}^qC(JFVb&0%4 zK=&NQB$-jpftX>Au1Tm&x2l&(4z3TJ?8Wh3GOP3Dl?=3s&}qscb;7}JsQ}i7*({BH zt$FQcaDfe=ob1yytkVwW%>QhB49E2oR-jw{8f9@*)eVAtB7Q1oZ**h#U}n1S_=DU{e5IVRt1_)rpMXDQ-acALrE)M5Wqe#9 zg!iO=7133~bR_pu>p%nY*tuEyAu_cfPf*HOrDaNz3ds~j=vyc^UO&1~aAJZoC@ zT#??r~Rnfc>Qw3L(*%ZY5e6e*`vQh_E$#|*?t|qgodzwtu!BujAtAeY^ zZ0erm&hDu9TJ=y;FCR{7PCGoX->84*&g20Bzs;F?Ge5cZDG@SFw2VWQ0 z**KJ@K2F$7G5{$@EG7w-AMS1KT!1SoN;e(2?0}VUvH&9GCV_#XaStOHFmORo!#S>$ z4}Z_D(HOM*kqB?lNromZ6Xt$`t?|Y!x*RmS(=7v6ecZ5Hbb`ecPLd5qJq!_qFsJ$a9)>k=J)F@>BmAwU% zD2N#~BAEx2sp%d<7h<2 z40PiY!c{1>rF^_}>@Q^`_C4j2c_@Y)aN)|bDX=g=r^+-+*{3p;5Xs==%3lud5vUjr zI_3XKn#jH5-U8teWHm3*_1shGZ#997YhYM>DpY{kWXFD6gl)Ny?6qJP0=0YjUM}mw ztiJlyfVs%9eIb~T`L@Z4w+_r|EB6{O+Xif}1#=NgR|{r!$o6V5vwQ8b5E?M|!fSlf z6tJvn3)Oq+-|D!HB$|73oG4mdfc!3CE+X0$V3w1=UOFw-Z|`!w_F&#&(wlNsG=sS? zTSYCHcfe*-_}?Zlzh_(^U9mlksMYyX0cKNzjV3TnJ>)}{jitbR{`{_Mvod&o2~0P( zb3^QngsTtV3w^^n!rrClGTowm}ZJXNH}J3UJd5UjhBsX02N^B1M93Ru$1+` zgn&}9z6cn0+XyrC5PTelBkW%xc~4$|&E+S($#?EH=g(2VY`xc9^V8zOPyuEWA9sr) z%eobS)CTT|Yc}qRkhnFNi}7+@BejGRbR7-Syi5G z=-@&ws0*HsE@DWH4VkVr{IYL-4Y4V(A&R$2Fy5_(M>elo>t;RbJd_!d*cg@Kw{$Er zyz)mk?Y52>0&uvAz2`XThje5!(7NSxJinoq81TwMxnP$=`_nUb4Tv475N@-B!y>kc zHjh-yh)fm^$uJWF5%Mt_lWQ~sPZc-9M34(Rp-+My`i_L`8Y*%&BS_;|SyGZuafl;r zW`m*LlwDiZJocx$pJ9g8tQ(0;ZC0RWQ+ALE@J$Z$UpR|~=+-7h|Lmkk?>crq-C@W1 z((Cn{Uyv<1hAg47Ik4Ubb$+3S+q9q}gj{xKa@Aan_JuYk2*7D zcZde|^%s?>9Rl^1$Es6QQ;x7bf3bqrT3F^B0|S~spza&@yULttKk2q*D?_JlzX(K6 zq6P|QGFy)eoaL}zL8oLqEM>X%+01d)=x^%MlZc3;V2I3bb693?G9%`pBZq>I5LdB` zAfPU${{&y_DDx>Ni7%P^NB}hQcR#!2_h*<3lDI$?Ms0S^cY9}tyW6>)7h3yNvK%s>_HOQ*d{KWVva_HM0s7CzD=FbuB9n zzMwj_wBtusFptmqCuk%Kl5okjDo5~$C~mETRfrOKpax*y17Os9MfSibSFL8Jf|xS7 z1u}@_8GfBmAxP9w=Zf+Z12zz%K<8|RdT3wys+!GLXi76)B!C9gRZMwccia#JiT2Va zf%drwdY#T5N{G*9GZF<#XF|E*8d&7?gks{%^14I`MhOX0-*|n`(oj&z%jNvnB(6I7 zmgJnJNusoElFY;ji%AmRpczdP7)R#*(d%>!k63g)b66Q1(;w9lKSc9E=ZZ$bFmuK2 z%!t79gp+5NVi}bR!7_5$MnXC&jXvf|KB+_09dtXUv8$K7aB2_Ht=B>5W3Siy*`8(j zonqaEnzu@Y3!2?45ky|S`%Vcd7iyoF6qTziahBD(T(jw9s!ve8KuA!ve>%1M2QAz` zMR&I94b)K^$bjnX9@Jv@NZvr(+4ax|u1(=NL2~7&zScX6nG@IqIdvV7bXp0sr+S^v znFEP~9dK85H>XnhDN+w?z=-%*ZY|em%p5R!c2Jx^cm-Hv2$IhmZvu7L7<+vP#G1hY zY5L`9jAOZ<5{gAOP4*Hj7@FY-G#V18&hE;LhHwxdH)0{e0fq!jAa3}m?;|S4>#z?& zU5zlS$C61O+zKcps-`UJ+c*p~n)Fxnt;Nfl_p~dMoQbvE^~B<6*l{t|lb>OKy4xNY z`+ObEDgToTc`02tdFg|1b7HHkP7j^x68cK3XvLcZ$5SduQ-cn+#7$gm3Bk*uTu}>K za^bGOL-W2golaf3KZ^C_yQVtYXFFHNe01uK7z@AEa*w_ag@Q(yQy&5e>VoiMFBQ{@ z9<#!B9~WzVsD{<4X|PbNc~ZtsCB#6D9cL*tB{(1n*VW|b{p@S0wHZ#XG!ch&;%=FD z_!DpnF)Yi8u1fiVD^Gy7W;p&9jVTEOov+6@hWIR+69^b1n34)OA&O}PufQAGkwYBl z6IefHXIXO49rOm>?7Ij#%#V>=n zb2%m9%oyces!J|bSjzm63sWikfmk>qYyTk_CLEGP)C{x0Ky@?&g)#;TTw5Q7i*on? z?Q7n^Je4GO%+KV|%3!3KnF6eME>=8)6_=B;v*ilSt7>_QhA3u*huFV@-zg1-cFI+e z6-20*lBA=Vf(vTJiz|p_uT@{~BwzbJnJ6+t|3CKLygPE+SQO9S{1mt?{Tzb)BjqQ*=XRIc%6beA0 zs!)r#GRo$H+6t0HSDZ{~5)tma*M^zlY=HXexYcLcm7II17)4QVQA^Z;%A;~)RnFsvWD zCdmM8d}5!5=3Sn~3`bxLQ#x@!KuxYF^I+MCci zlEyG&XvT7*oC8TAC^o`^t1RdGLd|D6bA?G=3WgQ~)JeuP`J|3u_%kpC%GEfL)Smb% zt^WXRulJYM$p!H_8Iw-~)RDk-JbQy?iNpWd48GZ_Ucgn19I(b2^zia3SbBEdL7oMfDmn_?ZAuHGO1JLd%bjm*&B;eaxcHXUlA@?J|~ z0fo(QeGo1EP*7iWWr>21Wx1`Dr>)!|O=6mmni&Gm6VtQTfK>S^6x47L3ZhEWK`3Y- z={seX*{C&wbP|@sEaP;TXN3;TEFFWQBYlcZXLjU9oJ?3m&hlYQ#Y7GIoQ#TQ^kTif z@k34`;g~tdnKo8Ake9_jP4o<_OK{C&2}QMF2059sjFimTXfke`zr|MG+ryX*?bWhM zG9{b7vW;HBqPbFDWWh9YrnqEKRA&wBny-wpWcuWJf0-3pv?g1)NGMeS#FxTI+gBMU zHsyQ)CQ=}^3u`r0GWBXQpE5BWygz)^M_nUwzc2#YhIucJ=w#he<|I7@e=X1pWyb9`~t$+z4}&`m)$2-r4haWMMl?YBZF4#6gFHa0ZD pYxMMXo%v zeZ}RN6ko1d>g#p-F;i%s9bRL@6BonB0)q zc-MLpCc$c8jtc!j(`NIo$qY1|1JBoO$v-SgJm0Z0^n6G0;Y~5s+&ok!Q%vIl$}p!t z2Xx5sgiinOG>?g3qfFZ#E8Ibm{>yKBMXf1LauWYvo|ADva$9mLUwG>8229(_T>k+B zw^t)OL4P;se`OBo_x;?9{yW;a`}DDx{`Yr#`!A~We`mkn(*Ngp#xWb>Sd;!N)_3TN z+Ul#|b26qPDNa2W^xvk*Gk|O#OVSY(~VmKD9=6Qco18fy=@H18i;oj zJx3_RV@b+^cq3Fevks6OOvLkSTqb>KG(hh^cxzT4&{1-qtgqEEFw)bJ34SO+*LnN2 z77mZzsZ8700_#q z$LSf2c!sumRCwlY@W>SQw8jXUm2JihM}NUFPD1$^wtD$;1%1BrJf#3o zficvZ2@{!CsT7kr#NhzoJRRwsqvO{7cSHD2NqXR`CJ>&w2Ymi$l{D)%M5_dFDj_xp za9QPBzam-7GXvHXLs{ed=sZtAVa{?0{h1N20{y0(-UyTsBDcMd-o1VOPb9G@z=W%a z^R44{%T_m)*^>*23AJj&hHC?LND=M^Qe<4kd(0S;Lq-|f^1#~4DP$&~?31o$@K&3f%HvoqB^^1v!JTnwM6i#5SCfb3__n^Ng(HUow9e|i; z1GMqY#(mn*HwjJ~1GJ&;F3dO6o;pmysLdEsKcB4=lG4tVtp07-CnhvSlPO z@f9_Z9EvYUtW1BlJ^A|relo5|$J!b?9F1rqH?Oq{dRV`9b(M_|R3kvCW%t`2*8N!k zE?G>t_TYWeuH~_i`8bLE%yzNu6@TkVUPDe!>89YJDT};v=IH`?0H-8_)kr3?GKd@$ zI2vya)q;+5j;fw)rlJ&T-QDdB>Oj3k-5$OiZN`JbIHEQ;);4`I?v zAB!JGK6DG1B$t#Wl8NGS8oB#f2^c@;EW^cXVYnyz8(0St3Hp za>X}?6DE0S6EsquX4()Q#5kHr?okl1u1G?ntt=pK6Jm)Q=vkYy$27}gqIq%+N*9Sd zoXGCq(1gl~3Z!T>V>yZ#y2VLpBu=$FQKQYu=9kcVOcPlINB_!|9r#pskdTlFf%&Yf zL}oH&+Dkpmc|1cyuBH%t-Zu(VZv?v0DOIL7YY2sDK0y6mZ>oMy$rO}-2YYX*9-k(( zrQ(&+yE|_vED3ECgD|>&DU3UX$`}hF{c%eFs*)}1Q@N6dK?}Yh76-Dy?_7vK5U3h9y>04yCb9wQ1@K?_)0t5|wO28}yXRi_KSpsPw%@+HJh=$6PuVJc zEW7{T-R)QIe|GnK`|bVDb36@t4v3v2C|GwI(gYqvF>rABf(RXe2yMSBs5PQ;|sksMM!Ejk#Mg0<5Ojzu|F0$_mU3N?rOQnLmeBvbot-NE-`johqNV@O@vNb9 zZIku+vu{}Ej=KLh?tWM18lr>zyE`aIN0z)O3j$*f)@V6ktEHSK*$8$1D7HHfYCsBV z)xW(p^dn6ob}Nwf9Q3@;pSQp9%-bTVaU6s1nYH+qR1P?5Fke$@jjkt)3?XQrdhMv( zBt^Sr{5od16wNH0kSLFh&W#D;w&;DcEx~jV3|~Q`=qUGu!mp36!CBljuqvPBFTn(*axuj%ZA> zSxR0_<%SL?WPkwcun3;I&Cnq^6=t@wg6 zo+lwX*o8mo^dcXP=qJ?iJIw>~Or}fQhvW=qNX{eJP$HbBFo?kWk*L<8d?x#l0}|Hv z8lp`)Od1Sb$8nI;39`9avQ(hWp$hbR{PseYg-i34gj*1TPTN?jMzm7NxQM7r*Q}It zTIoh4@EY1xuv~CK4RpqQw8%AO<`!59`)2*gAL}rZ!K>9L*n!g`MYR&R`GN{qOKZ~zHz1-v&uq2gpg#V&Gc+i?cKM8 zrQVv?G>of?=t?IY{YPh|M&@Gk5TbfOMpS1jxhhnxwuy++3PD^pzbNEfA3%apHXTB+ z3dOFiOvSX=paxn&d`*Gs;KN^9ssV|W1+NC;ly1P**Jgrnw^aGt>pCNQ+x^>w=l?O0UT z$*xO|;cf^=oTe4VQJLzlq8d?J5oH5J;%OizYEeUN_e{UL^NTlzZ`C#H0I1k8kw2WN zh74&lI{Ud5jjw09nufr-RNPNTSy6iiJbYZ{ zFu1$(M6UFM11S^fXaerh`_L{*#$xx>wf)Wxs|V8mf*D!C8)S+9$Nt`nUWNYe_jgOx(jvzeIIWbl%#xc0+F;wtSVr59BAgG(;85`5eq3 z+or~evuos23V8i=EBRq7@`+Z*%gXAgG+Q=fEFdm>dKAfsBcn;I1^FRictJC@1ik1FQVRUDAr|+=PrDI`rf}NOWdr%a4_FmxYCvH7qwo^ zueEBoMd;N<-PlsAHmj|e7dEQPRGVyWu`q*P)Mim?ML=C$=%RLuVk2V|bxIt^nC~^u_0?#&xST?|b38CX)0=@-Be4Sc7r57rc9dj)RrNC8d zxQGRW?(PP)&lw(D-KBgWAgzlWD{!B=aZ=P1(;JcyAn5s<1Ef!62ERmxc3 zt_BTTQNt?N`n|=vylOm)qOV${i-rzswO3Uk8vkuad1ySk8@jQj7wQa^L#*SP{ux=z zq(&3V0DVD8wLzEGJIyt#PfI1109SGup95RT3UwKHu0d=gbZ&uFC#Fso?EsA5@M`4H zc&WM@#SOuJKV`jh0J*?_?R&eYdLDTHTOdWpOIvgeyv+Wmw^NP(da%3O#(#a5XE|?D zHS<}X+2P?Eti9KL@V#y+X@;2(?eC8^9G1$NVx2GK?r!6EX4TR@t3D5;|7kQ_Jq+w} z|KHuZ|8ML6^(;^Qg1s7sr*;77P+K?sA<3{g49@BmTQi@pcpUr=taTdgIBKiYg1Uo# zcgC5|@H~|Mv&h(DD01)!{(q2dL*uv>$1@jNC2wDBev*|}qFwgHFWYntSrO68!;VKRg7d78#V1x#0=sG6Hp zF+p|AK7D>dII1CyAhf3rV9XiI&{#GUShKPzbyA4SllYSg$GPvs=X};q%{_xe(swi_ z>LS3P_jjm4`)xlDJ^w}OJ3fGmWx?hCe>MGIzu)G6dzPoc|8G7I(BvWbI4{7~{m(K+ za!cLU1u&1*b)1jZYq1E+aST?7*WoG%8>1Scxq(6cjQT-;Kj`_rP7@>$)WpVP9H93b zihbPpP;wcnlp&uolF0gEqv99gMD&zRl|>OsWO<0WIurvqpx$rfg1}2tQ^oi8v=Q(F z-4nm|-hGPaY3ToAAMmC6zx}HJ-;2H7R{!@b&sV1Ztrz%H90QN{1b;;Dg_E|VrgPl1 zA1wVpiL3VSgjW9&&jacIa;D#TmiT}7cY3w^pBKG0{>QUC4*lOGKcn>~5})y$Y@yAB zWeej!Y#}$86Zv^z@DPypJn%d2YXfh@BYr-POJZYj;Esk zZM@>s`rlO)a$9zHud30#m+H3A#s0DvhyNj()<)Io18)=EfYNYPu)g@N;Borlh8WWe z`>R^DK7C;6Mue_-XotRq+!*P4Q7pR>ccQ#b_{Byq9B8X&TF(#SJw#Y!CATvKm@a^Jq{dE5mFmT|}$c zL-m-f#)Uz7NOu}CW6CdjzHDe26Jnw>$+EO#Fp#m3#RGJCbf(@gUJX!N%C%Wfv11A( zEFx#NkP0AjHm|jE;R-f0FtscNzL_5gsT6R?4pHxwdyKLnE)oFv#hRSH?7i&I0kN+@ zYz_lr#vC&o{RPKzck$CRO%F=%n&+}suTx(BWzWSYqAK?__WIgXzjz*Z{_7lIOW1&A z^50Hf{(G_C+J8RFvvTUSMT~;snp-Q*ua%Hy{uJKVPjEr?xlF|>0lO;g&X3>K`f)$! z^EmpiYdwzs@9gaESN*?w{k@j{KgTnd`46UbmeD-}iUkvdj1wjx8-Mj=&|G3GNq*&w zKg*}NaIIb>WUf-Nd`wh?vEM;ll6eg+;|#3DU14J(7mk!Kb(_m)X~Gi53v{V!&PyHL zwP>y+cCREA7Le(&KNoNnyYBYne~#y&=YJ+5DnfQc_{?AC46@Y!Z?EeAcd)nD`hPsj z)8PNt7z+G}T>ol5--`N{mecQzGx#;isoE*RHT?jEV5hUh6c65vNmj3lQ*bbNyodE_ zmQNNCWuGk6A7EpT9npL@~&KrU%IP8cWm>0=4~@9p=i`oEo>*8bypo;7ravyAXWpo}3kx9E04 z5;V+d3{l`x9A4uw5rMacE+frKT_dz?hx z8cN6*NDA6aIT_JUBto||o1p*N3edYGo*|Y%O$ic82}dzaNZ7grY<}gS)%?;O_MD zemCyk{q9P{{;kN$d?Ks6qVhaP-X0%0`)1_(p1t@q^rP$*HM;nD0$qbqwuy_1JRN^2 zGAV9%cMENI^GOQoWxLJ3jIO?P?vunHoG~Wz3Us&e3x15F-XgQlpw6%avkN3sl3%m4 z6a6Se!N4ROwvuszKxbkbD1+eMOQ4i#WtfR{PDUgj)g9ALj<|JSJ_$*J6IV0KMsX!| z(8`zckS1r(E?Xznl+qeuF*o0g$Yd?E6xg+2Z_fWfNIxI>6 z^a1DJS;niOR$w&fm9`gcOXBI6X5A}*8GW8_FPw_x1Ilp4+O5Til|LcYl!;(Y_=dpa z*^g;3#jv!RF&&P|i*!%>l%Up%bt`E*DZA(tF)%DJ;AnD2dOL50kenGK>C1GL^S~ep zX-f4Ja?dpncEuo{pZCvE)5o>MWQ7-*7eok;+giHSz_>4vCsZx!(u*bT5DJ zLpiyK7U{JNCUy$AK^|c4l418TqSbUa-1Dktft8lDEHDa9&nK?i>gZbog-NrMr2L7% zhic7nRdFhIzBOR7_0v|Ryz@`6CD+M$S0_nL6&V$sYcMjEXo--;NG)Ifu#(~`LU5b> zWXk@7^;0S66GbeI_k=s6!ZSOPKm3Db&n~(Yki8QczG9aUyDv5ZT%rH&g?W}o5t=B1 z%D&8is?@5CZ!uCzlR^*;?&Onh3Pg)HPn-+V|A*ahAffEI1G%`B1B^6b&TPWwVd3LG>ld)CgMQroYKkSs!VuA%^QL7gk z!C|HYr*S5(lp+>u`N<}+jPBj+G3-N$7Z_qu0&b;Z{X@@{f^9==ZW3V)8%@U+_!WaCM4iup}1wO zR>Iby2>J=wJC}SvQeCI^mF?7Qv7EkqP_pkdK3@^_TXMUI- zE$6mLG2S3TTiM|x&5cb?eG3P_8@oWca6g`;ScQgMdr01WmXJ6{>r}^ZgQJXo1fk%4 z;*4;w(wHa+F-R9rbvydiCDCXk7EO&z4(Ayc=NDC-sHHsJhxRnnVvNj(Zpx!#mJC`M zO8Fk8e#|hACUI$i%=^Hgw_AHrH;JM*cH0J*eDADtiX~qsBr1aOS6V6vh-z695H0_G zWSU31LCb@HHJ8<0Pd0pcH6R-80_vm|SPk4HUfm^_LeT%7QmI`PHxS!6saGlI#4U^x zk^mt4ld-oK#Pm!(j-EEHJay;(M-5wCmCXowR9;cWyy7q;iB37SB`xx z!G=?_an$gViUZlGcUHg44ZBuIWwo4<&qg10d1d6mIa-!boUvh)k2yz=fi3kt*b1@4 z=J(XO{0+xxA$$*6-ywdGP`pJ^;=vsr1|Yuk)sR;ne|q7s7H>Yj`6M<(1`9>fcO_Rd zpF!(yvd&4AH*&_>bBxHD@fgKpyg$E8-6$A67&hgkq(b|$2QCSnGh^e#M9X>JHsX3| zTF~OPzuYzO8cq|aQmZ0t77;9jWCj+??1$KsG+`7xo$1O3vfaUXiAG;q8xlsWsFs^@ zl4ETK>4;5V@FTNvHn?IS$8VRi;ddbi4w!cs!<`L0XEcX|8LyCMuMS$)%R-MtC6Yt?g7Z?>SbPtvDf7}$YN5qO(3U}VR4tT5Y~xSI z807>;9?jS2NCJxrRzp`Y+vz#0DEv?;&u|LSNQ<>}R7Do(e`ygm>bmz3tZ`ib)@v@2 z$c#aXqwK=qr*w3}m`fIM!b*E+o-6RPfgE%aP@{|j1)DD1^SHgLMQ4Siq`D+0Z>Ok4 zj8pzyO{Q|m`F^U*y@`k|G!;)IJ?oD&LRy*QGF>pwMvY%8y?i|x4~IG@%vyy!b-LoA zMITwSVLW)cvRKtgH)aoUiv$-SD#Y7%P zg+8CRSkE)9q3~tbCOsWatG>DN2GyY8HZnbV4bB4o+e1=I4+o5)G8LoLSm=I!p)^d& zu+w}wY<9joo@7>TA4l-P%oMk~$CW;10O&|%%UQE{J{nx#)G8%s zMtudBotA+pq0EV#k5W2W5qVulhb6r3`^&VPWlAY{2%={2(InIwEju6@b;sNNgXZr< zXRDQ%GMPHqI>MOGAeb7K;gs5l#67oO0=^`Aik@xMxc$x!;-bNylti1Mb~=+H7^{xK z7X08k?ktMvwl$v4l>8s7%|(-RVC9<{DGJ8p?h$sCTGs9HGB)^h6>5OsxzO{Ubw!R4 z$-i}+g2{yh0W9bcRB?VXrr%U(K1E`tpraNYCo1)dizZwav|PW?v@#~&-?T1RRa|b( z(@*G=>{-x8yoeGU1zO7^8v>QDj@ndotLp^ZhT(GX<}T~;!d*$ja6weJ$xg!Lg^VEw zoKB@;p2cm5C8U#5WEgQg%7}rbo^VoT2bl0g7^tDkv0flS5R|kJy63slJnwAGnp54ImJ?$(_AwH)c(AozCa)!YQTupy!*? z3b~Q%rM{xOMx|wJLg6gz%E8x>dds3$kwP-Zkal6|)H$#i`&`99EXc#t_3swb7Iu_a zQ;j8chT&~IyQ478oQlJK9A59;50E|$&yQbie>g=2@be1_`uTeMxCC$IbS~ds^@v6| zf}%b>ULV_H;MBSL@>CdMttCZ*YsBI+n^uJLsRzRqCTFTeu;IkI6iim(^m7e*1WpTE zueM&x9DDf${oj_8xTqbOI!`7?UuK^o9GOr-F0Zfmw|RA;J$&7MzAgn5uqf^XmJ!Ek zWs-tj0{oJeg4uc9o!rW0)g276()8vWi49^(ZSFx?_>|bGX^0A{Qz{CSz9^8y1%z%T zb)N{)XD6~LEy|I82o2F>%W7cexb(#uavEa#+l?WdOWju0h>bQE-AFUGF)G@p9O~XX z7T~-o)J)`DE*CP~Y6S42EZ_MW;f?){%5^5>HHN&7Dv>Z)<4@B8WUK$y)-N5TQ*1c| zWtZPdO|YP%rQ{;|X26|Hr$XzVa7xFKc9}7q__n$Pu^#A&NMef$G8)oFhIJ^wRAb7# z^p3^vYt~1A3bo=Wfq&vNbm~lp3U$AAcKopgu%?!J$#wK$9$;Lf zmpV@^T8Hi{=VDDD6NVtCl9FX99PYW_6!21+R3Vpe$qmmUhsn+*a&DuA z*Z}bMVK%$gz_(#en3F9-{)QBEH5~uh%lF_M{DnE%k=APa(BQP|p0z9T2i{Z$5#g)h zVa|7=hb}<@abFz6EEJiPP%d07gIrm$jkH9RLB-zqHoLVo1mp2Hynaape7k{RH2#YY zrXCZ`>~Qd2&$C2!$$ z{c(MMesFPfDhT6(Agr5EdLx&`)K=-ggVF^;=4bHT25e4dRiwUOGuUByRotWY0lp`ZW&+;9k)f|_+T zHHRAESfx7!l+$mSa|lj0j)?kp9U|a>8MK2GGC)M*yf`~#PiJl?o}X@vD{t*tJVd%K z|53oebZVnh=RvOEppQR$hCq}iW?U?)`AV8!JYp;|)#Xp?yx4e-5aIc;r3T~F#H)dYX{CMUM@~Mh zx`y%SF$=-k$b{=6kkSwTy8JtK`ml>2aGNumGyfta0RzrL9g>8tkSyAnb?PQPpQV$G zzasP2`a!BcKjyV9WV;ahJ@C<$=?!$3^k<)ox}UwnkVN)9(8Ke|_7T+SYybY`_OYbo zpNswFb}Kf`CodMhuSLXrMSx&R6XCy-Y^H(K(mKqZbs!1{qvl1{h?Ah`efy{4(!d5vlbJD&^QX9_o-IOm=N7M50%bx;m1Rci!BtAM6V)YyU$zwu^82Y73o^Af8@N=;!s-XpvP@H=$s7nsgym-1`3 z@nL(nml3u0!+jmmZD&q#@+5kl`KhNJ1QKcwe)b#T6+ABkL7}T*fNH2P0v-wH@(|$e z(9!U7D})^9q*Ct z@VLt0c|UEuRCs@{P>hs`67{Odml*@H7^0SaK{>R^1AfJ5U|-lK2XaQ>(L?a(vQ00(}(X zfLeQd=U>0zK-DN1lK;h7#jp4eXBA0_dY|W%9mm*69-?aSrA9NAKEIJH$tvi6{m4b3 z4Q8>Uk>1~YW=l^QQ{NCen%P$8&jDt4XcbGWbLLuJcRud9cYJ_F5Tg{8QZ8>Iqr4cu z`Tcc~je&WS7#CI%8v!Qj;%lx+rCb>IUw!iba8d`}!>N>oECKkYpMjGJBAm@P9Ce>< z`xVoFFy`+M?d|IcM1IM zArZOt(3LYG4onz&;3O}?(cn#nB-8p%Mtdhcl;sF&(+H*2*2ski@O@CkT0c%>RU!!p zG)f`(0_Gx3<$(?9T|uTLgEafk%pMv)YDF!JU^c?AQ#|a1wR&?J4sVc znq=IoJcH_tnZzq*z=YuxBg2a!wP~rQ8r~ByOj%N{mzKJiOT}QptJy7C8T{V%)*(^J z;1a8c7`!FO3evUzSv(%6WDpt-p9Ue|6Qj<=0c`xA8Km7Lm%%L4Io!ab1xua;eUGK;}=Ea)X$wY23=-9k#hQz5bnpGeg9Rer+;nh&xdjP`I?!9|l6_IO~7Su_WG z!x;-`#Enlmz?|Ia2X|9s?K^>;c6Q3SJuJ8{`kaPaZy}3wS(tv?d!{Yi>&ObT_LM1& zo$eTlOtPKGKD&06S*4KyKUG^+I?p+ZO+o$nH%5YB9-hgFX;s}AM+9A69jBKW+;;Yu zuPJ=1Ar@)oSP)q{{HI`=C=bu@IgyHiW1aJ^%xvaVJ;4A^_A0RmeoU0dbMlF33oI`L zQJ2Oiu)K#;D?!I?TeFxKVAPt$Td}KyM(k z&8pxE);@GN_lhz`olEe`%viiK0okffY%e#yz8FG*k{Zlq+Ta@8C^d{mUwT_x4S#^1 zUimIwK>ztsEEDpG@vAD!DoLVG(aEFLxCLx#4^x%7k_%TAt&Kp&Nj6o(Xl76>ycDfm z@|OpfLV4%%J7Nqt)5F$(#CaC)VVcO!NO%)Y&rI{gr_jMEsP5E2UP($hW`rsUh{AT} z8=+ZACBsG;%>$!R7kFPKbX|Dje;}7rbH?4gtRrgU*nL;QaDd)AE&ok0R~ElyRqn|y z+N!2oC}RV^)Q#bVT+J%bN(k%jPA(;B?zTXj>h!OiCQl{%J4tFfr75{wj?@DqYda|H z-%xT8lM+$GNEQNjbh11BOR{sUhar}R{7!>i9Rl&f6Z^7dw6vn!g{w%wUDJ6(;4 zgB!DW&U~8|Cr56R>Mt8D9fI-K85Ynx1fdp7i9`WXG>Oo{rCg!1 zJzAH)rt-deiqlu_N=~7w<|`sma0jV#1D@BKnfQOeS5dA7T($GaKd4EaoLM5Y+N5Ez z?v%d`WFJ7f%Dyf-J;)yG?@tx$e@A8%Q;#z z{CaHTQmP8@`oF12NMWy&S|b9z{ayUr?=K(Dl8VMXk8vrV(^}O}0~LG_bw4a_AR3`B zdu!Zed{sue=(6YJ3mdANYed+_n5?c5?6-=dn1*5z-COv|@8%VA7;y2Vkh*yK_sB^` z_ZouMH#L-fLak#tk#~Dx;dVl$lO4~79$`!f?ceX`C2-@ka>Y)56>jW{Ozl18h5sXa z@9iMT+;bB*-wQ~&^ZQsuXBc?P!9gXkoS$=0BV0igYO0b=t$@8(S$juI#y?)H4bKD`jd;v&^DOro@hXlX-uaWdrnykfs%!-V3iRu8bbW(?Z~v3V!$n~s~tP+{0-md+l( z;{9AMg@dU8<78WhfE`#rmRU$CP#g2p^6nU-MJ!Gcx6EPvj*73JuhXIj zfl_E5sC<3qPoU0^%1=knC!^jJP~N`xw~uXv3{d=@o>}O$G}v2J+BE@2Y2g!r%Lrh! zX}6mAZYZK*aOEv7RzE2;i9p5y+E1NEZuSk?J*VfUpkkMn@$BN?y*D>f6xx%Gpg9Y& zzVr7H?%B~RNsfGKpos?yj7tDoNcdSq1^y(cX$ol2rLYt!rh z#RCz($B$vWw-ESw)hn(?fNVZ?iX1nZE(Xayz)}pGpm+V1d&wH0AK>ldp%&om;iK2J z*^{#Nm>_t^TIsN4@iT%J1nS+4ex{O53=n525T z2svw3p&#Ry3O8loyer$=n@?!p;Kea>%Xt)>?ir&jk7Lc9(x7k6gQJ152Sv!>bdG=C z*-gUzg7?!mfhWEnV*prB`io;}S-RwgImLE6ZmZNyT?J4@Zp1Z6=cA_qt}YaG_2k?9 z)*En@|3y>Tw;lmqjDg(=Fmgc6M7%q`{$+Whj4y>9QQGY{=J&|GO9dR#o07%Ap4^_u z@x5KMjGp^XzK@xdtapdmDayJB8ctK3^wu=I<7pj%+W$j;@eK@Y{c3RDvL`$@Z139) zi?ql8AkyIHh#tiKPZzh3r_Lw!WJp$WtO)MoE=Q_2+duN+v~a6wZrsY?Gne|LcLj}< z0PB;=2_TW?v|?4h>*R)rRg)i`7Qt3=x}g@Bhl%%1eh5dQM1@t$JJ6ldxU=Kcs7Ne+ zvyHa1^wi zP!(&=*>20m?|LQxCP&%ft}OBgp%{Y+L;1T=VYd^`Znzo z=zlEShB%nwJRr}57=;!aBN0m@+>E|fQLnj!Ts4gUuWx~|-)iuvicVUp1_2dVl`O8Q zve#XB3RGE=I+Cfmt^(wtT;yoMYrxefg&t^*^Hm3Y!eU}5o^l&!Z#4cYdED+?yz2#- zv^InMQZ12lcIhefUh1n(Q-IPT;XH9<8tL(Lq;~hPM0q--EM(5HUoxmlNZ7*sF@5;p zdev?iK1E{fs>!Jdm@VhY3v`j zF^v1CB19}@#x5hdM*C;Y;4A1qfp_-*aVL&#Jrq1DYm;t?2@mJ)no>ItSNs%^M4i(C_`qf@8v())C4isA{F?3_4TQFf5g zbx6@gv1qtF6humauYL9{V>6kBzhE9n5=l4A{N}u6w)LOAvY8WSX8_>xl-^3Sy4wwr z`K~1|Aznebp2A59T2O0pLa!4}HO|G%WF3s}2dXjya-^}v2N(rxnHWtk^)~}M6abHi z@)!&8OaV&7vZDIodOu}6UplM|y9I?HXKm`b`Z#iLIAqK3fmznc~prCU;l(8;mV;lPa=OIyRY@S#`Dx&jhc0OQ3)fKOwtPo(6R^w{@)%FZ*R?`{%%!3THz=4t2-n91iJ3PgJn(|z5P}BMMF1qUvVn=AJK@*od{d>L zAWZsMVQ}fxyJDER(sgww@V~QuNMZtN^-i2`y1K+-5-;fUNakw8?=aCeQ#|rV(ok-j zX1m?W{agiE5NyrP+af&`lPWBNQ%YZ^#!_S_k_v2lflP{}50l7XKPmqehqS?p?g?GD zds-F}Z}?qqL!U@F=o?uMrHlAX1farWGypJ!(dK$G64}B=hKBm`g>78PBo^ zZr6{ua@GdK_jPkE6$tWQ>LAFJe^#DxE?i!`{dn32t-ak6`+uF-eIK5u`QAP_LDP-= zvM++;4K%Hc+q^#3sSJaJinFC0g8ryXQWDQc1leMET^NQ1?seQ3kOjT$-P-u+a=VRp(0~H3c5kuau@KB}OWntS-)HbXA zgs*NV0<+f7%i%V?FeC9{mI&lLUS+R#9&n>m418Q27+rF? zFP^sV7(Z>L#EZ9>$rvlK^i#0qnkKemHW5di`_fZ|+CHkFi1#>+;f6t@;^Oq|7+arW zRl`=|y{uY8ZK-R+lwV@6!NCgJVyH^Nn3I&}O3qr|0kUG8U#fw1Z;E3V{!yk-G0GJIYU34r0R?ckx6yy2`<1WZ zz%lg%r8Z)Mf2_-X@b@wg=mdL(W4#vXk=M&YNTDhZw&i;LZWJnHSuz@J3lA z{z^15KAy;-%=|j_-;NKK0O`w*%cC{Kw%XzTX=`c;RTqN z>;QD;M48YtGw>PpE`xYts#|DUWU#pgQ(HAEs+E?_?nV01z~$hNI%A7y+BPU@i&>0|GOQm}BR!7Qdsl83m-DA}N}CqxeC zNfCq?o>o0t%xV+TqK8;y+^7PCS-G0g)|QHx1}`Jj5B`pS#c)PNTPh}mBDF*%6^Krv zVp~a(GNl*R1LnRbL9Wr7+t&rIw}639)g(}`hku{x!2e9Jnr|7I-%b(TnTpwTyvlq3 zy`rpOy3}bJwqWJjm`!$oB)55y60gzNd$>@la@%TnUZ%G`pYxo;`=D%Hq?b_sCx{E!rM%lT5i(K>yHC|LS` z8{l_a)q2FQmtarlr<8u+a|JXt)T^wv9gzfor8qbJ5`WLtMaG5Evnz;faeUNzwo4O zs}7W5#o}S%C)=B_N8e~YnKtRJ(JE3J*p3y!%!W?zKuCnLtF~k>Lw(t#e6`bRENJty z^b%a2Qz#U^)H+zhdjWsq%fVbbVq%s@LXlKvQPdULgIf>wGGzj?;w=sz5kFGfZ#1j; z$y^+m^9sh7aizvkf@*9bj?;;gv?12U5bOx^^|e3KU9vD-ko_f1A%M`o$(e5@U4||C zk`@#Mv?zidhO%q3F%wz2dBJFf8)B(UjXRTbRs?ECf8X={m2^lmr#}xx zwT5~J{^MSRJyM<;b3}WL01DF1O9<*H9P|A{_;qiwD5`_vGi@y5h^*sC=0AkK^{ynM)-j_L6Mp6<1@ zauy@34V^Hou*rmZ{C@V!_OSd&Cu0+aBQPZY=vQ(5k&4unD2aW7b>?I|O`uWNz0bnV zz_V~%K^p;EKMfvlZKxkzZAL2 z6?-K^B*;B|CsV!6HtV|YE>zvkBU@x>Ul<^d+9h;yk%<#W7C;)hh@=u>4h@?s~T@qzW!NZIC$$`$3FPDaeeP^t?; zRI92srlYwzov2NQtF=JJMP~&199!K#@ZL4KOK1E2b93-;@pM(=Gz-Ba@$Px?_2`R2 z9Qc0sFXC?bkX$G7vA@&CA5(8x2094+o|46zLr^i)mK64}V@Rj54weApk1D5@$S%@^ z8m~OE2HIR5t{z*L%d5%uH1v(>oin<{ag)A9wI+ih7fIECxyDJ_$lOqAG0N!5&0>^@=#WKwV8tF(pE%s)o9KoMh zU6x}HHKTUY9H2~gP2?MdoL90(^LiCT*P+dXfb~-~bmngHoa#W+0eB>t+*JyhJ*g@` zn=ADmqr_c$++d8~wm*VxtxK}NZ3?Xxp&nbHMH&80(sxt^(=GMc&zz9_(gG$bX7B+L zUbY*=5C%%_h613%!2Uh?bpIpwC8xn-m;|>!Df)dPxgbw;UhL?gC55^|(<>Ho*RI6R zL7oP{uslU_`ks5g!AM}nDA-4Ln3i)OfPtCj5jypYX-bB4H?&|1x}UHxjIX7`39pov zzw9x7wk6GG)!n9A3V=NGdR&xrH(qt6V3?=Z{lm}IMUx3mgYzs8SXbx8__wAn5SfpO33VFe<8oTF#cW@$ zRu$aiSMiHETTVOl@$nHSH4m{vI4`^_!~<-j$2PoEr$i?Jf8j1>dYG%mxQnwB;W$FJ z-k^3x$(H6wvto9HI>pn70#ZetA9o7}0X&ut{*QYb%cWuDCR&f)F;czsH|`<~8XclG z6XhQmoELesP&z4I)hL^@yQ(JBKC$c&+S=Ieq*s&(YE*~D_M9W|?y&-Or&MGn=A~^; zJR(m=YX0sc3?^Dt(6niE<34RnOnM!wIz&#c42Oj@VKqHd*b4}7ESk1!SI{Z=*N>(D zDso}BLFL`;ArH&9?@S-vo0VUo+5rTh8|_>eG7X-h;HHzvm47E*7I(FoFaH{<5*otJ zYv-SwH{_KlhH*IU@50Ix4}D4M*E5!GUFU!aA(>y-n7O%lM09;p&&;5n3AB zZ)pN$oW9{l z25GW@>Yb5tubaFe?(!%lVKYrXiU_-i9p2e`*Qx71GPb=|?_s@Vv8WL*F|r&n@>Zgy zn1V_;#gGqk*F&U&>lOma4ea3apO`Pz0Hs&q7j{TTA zDRYLn3e?uCZXHm+VKv_65+`GDooq|&zW*na<6bgwIKk!ave74-s<4*WQjktIm2FOM zb_1qQOp}D1V*4@S#$8)!1yH3!eS7+C@5c-(i%O|!O+eQh09~A4s$cY?dOYd8U0Hh5 zCfA>z>v2KWa*IwI!^R9^l{XEeIXBC^#?rkYU^zumJTeKvA3sM=Q$`(mOt-y&Nzb(s zgnOMP#8zbmZVne>%+;-!f@DH5$(3J-5dyDmVE4a}*Oet<^ct-1zHadB>Wxjzc&$N( zmWlfJh6@>{RcQ8VQBxV&)U9=a9j?!syA{k&07iy(O4u6&&SBbmNU=1%%^9yl*EMLP zlAk{KIswzv4@`HzJ^l;aqyg0V2$9H6fwn#!I6#h9`HLX_>BjIWr&RPB@~jFPiB?Qt zzOjO-)TKR!@%r~`-n3pJ6OlcFjs3L9v6l-S?z>Y^=d}vn(u8e6)9OB9Qy6*l+AI5d=_>VH*r@9EF;(k?579)}4k8C*xP#z+s%3IS%4W#hc)`t(`2<=ts^xp~R+{cJC#N4$H8Ts7 zwpQMJ{+c3-lcZ1Its~DW>uU(AiOI-v0Yfr{N1>*oRA8~fHd!5#N8!JMO?eivlu{Ha zq#X?~((hLmVS}S^Aiu+f=NFU0kap-d*w3S9!uXN=V;c-$2l2x+D}>ybmZ&3|fa#67 z;DO`BXt6~)#90Z?HUOZF=ig+n+CD**E(st@)Hc(N*$pm%w})g;#{?5E;fm2CwWI&| zh6Q}2jmr!(m#&2Yp`jU;5rR9SgN2FRmII7oVCi42kRSwWf^4*x1 zaqxAs-vdE9l=?nbr*6E->soER!%0wmQNKH=0&{F%X5T`l; zgbllxM$%B{Ku_bn`F)a|$pVE;bX4*$g-_6S%B$fN0+|{cek_B;3(Mpsv8c4cNE#+J z2KqK}0Sm$WpyEwr5dU-1{Cr*vB`w+7s{hS}} zxzICBJbcQHRWxPrx`2f-iUuvlo_+S5(YNXp>>l~yu>sY;WWOw2*s&|9#As9szU!zE z(wr#e>Le1i!U%H|EjT7@L8=Uk6*6mUyqE`v{CNH-PRZ`c*u95r#foLBoVE!V>ZZ%5Z${q{Sl!kN@_-(g^Hk*^r3BPk#`hRGBs=x z>`$NHa%z9@%$qZIqNnJ~BO=9Be%mKhL$5_{(~|9mhA*03j;VJ=U4ZJ=JS(y$<1sF0Gm94+5aYvB~ z$u8RXVLN-_64w6M6UQzkUDAbUwY5zS0zSvunmmXz6?E6pavmhduc@>%af}|e8dJF& zPNzLnO7-t37kSNKRXX!CcSe7xEuI?MPP4Fv9PNC0yWMJF8Q&Xk_k!}XzOO#CZ^$J& zD4=^Dn?VeyEEyzw1NXA&S&k%Nh^H=BJ52tqR3|~sNPB5Xt35&c`>pz}(gokvwX1<~ zDw=Pi*l&s9Ak)Zb8z0g#)G()Hxv3=#RYUNgC}=lRxZ7CAx*59FI z+6(I$(s>G@loNuegK(;x=h#T2M6R1TYJ1}?D?UjW4M@_!0#-k^`=?@jK94m(AbQ0w zR)qImZ&flyZ!Xj|-6cqpGRUw2L~z^t^!CPg@fjFjHVN7~3jE?Sw7zVwzhBvZK5>G4 zvq0Vd{W1uDI{Fq=gwe;82+F$cy?WGr`NI1CpGN-A3H-@GpdhI0@n?|F=hM%vn4kZ# zbNRn67Fgmv_dgcYas3}Rz41@pS$zgVXMF`L2!8uo<;n280G`|5?V*wc`K2Zm+BT{-2lLMa({?T~Ow2FUkFXeb21@x6S;_d-?74DM$eRy-gVqfDZq^xAy;D@c;H86yKT@-`;z2L4sgl PAds2Dn>LsiH`xCHEMkx= literal 0 HcmV?d00001 diff --git a/assets/rancher-monitoring/rancher-monitoring-crd-14.5.100.tgz b/assets/rancher-monitoring/rancher-monitoring-crd-14.5.100.tgz new file mode 100755 index 0000000000000000000000000000000000000000..a66645b0d361a74e177e2d947a06a48001717173 GIT binary patch literal 117484 zcmZ^KV{k4(*KO?N#I|kQwr$(CZ96%^iE(1vw#^gUd2ZhK`>O8!bEj%*cK7V+nVO#N zy>_qF1Tj#kK>s-aR6sOFlFE!Gl5*^_UR-R(Eb5G=Dr{ETDqQRe>gw!r8n)I(_NHE{ zN)CLIX0~=fmtP;wB(2Sqv8N7v;L?Y?3M@V;siw1QJR9TkRM_kDcd_DR>m6mKxFf(B#c>vBpg3z(8>yw>bj^I)a|Xt`EI zSOhO5ST_Z-f0jb9g?JVRKNH4c6PH1wot56U5YBqgR6ey2@_XZ5s!pKTIclzMtwn* zn3F)bO1GXRXblUYCpWI@COKxl*I9$TDMfwe)V z=g2Zsj%m$Hyp}>1sSJuNK*0Z_OaPg=R9i$=oEG?Dz$OxzI!Ly{)HO4ZSUZs@F_s#f z$w(KL8Ks>Rdx5qHd&$tkA~%vq4)<#wIx_&tSSlFw26;r*ii?N1vy+F1w$#K|MwmP` z$VnY{)9)7_9_T7hs3kW)@!~9G9fSd2_ru$q-S``Wx&`1AniNZtZEn4+#Blme;PbO; z@cj+&xp%uf>G&-m>LX5%r?LBXoWUPuG{l*x=kx3zf3^Ri)v?$r^WOUv&d`X2FN~VEN z-fAQzVvUc=!I)2ViX&w(;_W5T9V+zZIY}cTJy_veAqKVzBNdsk59^Wwi$jYN&Ck?` ziY7-qg&sQb;6s9`B)R?^?E(H9ZTQ*j;D4nd-uil#g2NbEiy9QlOpK;{MzXrf8#oU| z>?i6zJDdjE$BUkYr7%X)=<{BeOWx^bO9w0>1d@c_*fIjMKsAPvWFG$K=KI{y@wG9a zUIv1K^|wemm;`L7Llnu9hs>Ih!nSC%2yO3N2dfSiu6mC+NJk%9WRaycF2cJG%41?nR$bUKsXrd zBTG`fWs8q!I3qIh`zwxoIP?z5Yx%@)D8lfyFTP^hAWsQ1M69+?rUpj!B(n|e&@6Sa zQR!5~gWhNW1eX~Su0+yUJHNqwX{s}~c-;1Ll=JxGrKlskN5^N`)LR2L#-}orpJ*(E z0pOem{66w$DB!l8#Osk~aKHuq>XqgKlW- z0^Tuk343KC?ENw?J12#_BMeb`Q-!mo7GFMXY-LKCcoy~^w68KIdD~32A zGm)BdBsXZ$vxAhr01gEbkFI+P>#f>EhB=b{r7VvS{axOr}+w(t8GCYU$eu=*af2u&+^WUC!8$g)z zAbe5>SYW)_=pODq>t7}p_`f?8Q}o#wFw?P^p3HXkewwVt-X?65+)oO)#h;44_*t3~GB?`OiA?W7uG zy8M2T_d5dUuX%{yQnSl99X#s1n;$_v@o=BwKd??FW1T0A@t;`=Ao~uDd@L`xZEboW&td9zH&@%XfvNY;I4 zij{XfRt3>z8~II|kZ}$NdQY<-JKD$gR_BaG-7&o&Wc@yy>_i(6^H?n!n|G{}t(Mdt zi-FPo@%24}9!f-`<7;UoDg3rSN`9pZ^dVY`2VIdV95BQwx7I84o&RSCCDDGRVW`_+ zf+>ECq4SsldG zd-e;Dpa^mMBh5L%xsl()yeN=#%JO8?^8pD)*Tq@xJOFU4B+gd6XgS}_gesjC+J*5_3Gj4xQVwu* zdhYO5+;Bz|6;X{J+u77TpNs~!bcjF%cZhvf@%gpOb;9z=H`@rNyq8T*#;1+zJj>4p zA}+=vjU_-F5%w=-p_AnP_y;g_Z4jB3T(++-IO@N#qKF`YKfNK7vDMTlJi|=e&I%|Y zO3nEsi1R{U1#Kd~-P}LEK5Z@vVLKe+*c|m{6fleh)FJ!|0gOUg&8B;G68MY?B4cIC zKZaTlp!1%)fHPsuV0WK;r8+tPyhcl;sGVt)M9;c*Av-JmAB_lH3AO6;9 zAl4Elam61wy{XdP@j+&ZVz0|`qc1Cc{%5F;ZyOm^U*{Pim<`3RP76Uy6hw8Lt^57- z8p>Pe@}(bQsOGamk~$jW`!Z~3af+7!)KA}oC<39b^>{!?DNLr?w?#3NHTW! zjgcw}@)eq9mgc|xw>{=QTk0x0ls^7241L|d_*LOQ!eaS<3-uN=T?&X4r1_J(%)GvW z*D{j{9H2Vls8BiMX9UJ(W(gErMCl#S=g}`7gm0}*;M7-~j;=51Y_xX!Gx#KCY zcAheso0Iq0;~uA1O%)tCI&Ql(GOf4`yUaq#+vkp`zDauGi#QS0U-xiqy|WL)OkDxd znU$v$Afwa1&Hl{%SlC7I{*WNh?I4&JX3x_WgUom~PTe45-e5z?i-8?Kw*E)n{MIZR zGi%PMgtQ3!Yw@+*oC*__ttRLx3!j=OLUhYH(RzS6VKy|5Fk1|I4N_POb3R1;fX`*t zFev#2MRLJ8ZTE%Rp;Lhr22||Q%({zl8m5N6=r8X{#P}3$0-v0H&?k$=26HU3nf*$s zQq=(KmaOtb8{WgupJkEzIo($!CTqd>g9HWq8($DAGPN9DQE;z!!|N{wy36<|$}xBPxxq0`%*k3tbFn!)e}OAqdD9uo#3hT9P^FW8 zQh-Th%1)Zo$wZPtbX@B}FDWxc!Az;Y*x5qU2_=nbHs3_eZpew}v<%DtHSW!$C0D5s1c3*fX7;5p|RYEQ~ z+q0}?XBBP|?#^_e_CsYx6HUwBLRd~RaAQ(p z>oL>$xQQ(C@|G~;&}z4PlI&}$ej#1%miaH>H9fX>s&7UA*4F=#8kixri%k$3lVte! zPc}1AvSPp)375-C0=oR!xBHttl#l9Il>fbTv(@Q%VWJ4EdAgIjv8#T&4-`kSg`=7t zba&%05f1h`XTOT5bzz*|FElaqq^sZiLCJ(hMeleyG{YlB3;Z4BRTzxYWT&bctvxDcVXQW82 zZBSOV{h%H^!1AF=++QfF>6 z2;I5c1MM0urp=!`b6Dd~s_V*YIQU!5x_r%CkOOw5Ha^FoR~TtU;xK|Bu9Xoy9$i_2 z&(5SY*sCOFzHyShw>E3p%%3H*+13G*MYx!5fuC3JpEuBQgq_TH=BS z-pz400iL-}$Vq4ZhIrLfsN_hMs~Kh|K11dS??kBoW&|z)Ry*1+GV&JB|K0Eww$6#H zXnr-Ax4%tk!nevCfl*a%QG^eZ1Y{Te>tTB2+(WE^Z}kds8zx{Uz6TeX#;=a+686*Q zF6}sC8@Uu7fd^kS;7^30xEHgr=dT0G0-l#=jX~H0$1;tkpHm2L=5c^#a^`UYINc)+ z5jbb%K*iSvRXOH`1C5$t8)z818U|kXOgWBG`g#qVmz8c_dz`+Mw&CDoL1cq-RAsTD z)|$2yOwV%G3R;n=gKYsLMcs7bkKbKSQ^bk4Gdj+((kwSFqDWQNMR-OijG`^eU{*Q$ z3>_{O>^`{KKlL#*$Z{m%(_f`y@KdxTPA?wm0jBm#XefW+L^uU6wbC*6thCkODzwWm zxm`ea1SOJ+90$S19K&x6P&eSbPG5Jer?%=EUG@Vhm z@E$Qq3>|5hZ*Bn@2D9AygHH>7bfJ3y;mlWqd;QH^Ws!oJ63bixakPzEV%qS4_~6G3 ztwQdcHZ?asnKaVjo#=fEc)~OwDdmto(8)Af@;z?3e}kX$R(JCia?K}`fk*CXE}x>a zdLT~BZ)y&XaBaX@TM4>uQXauEH@e$$!^Z=%k5HJ0l%p$m?I4Q|-^l4|6f?F+`rGdI%ACfQ5*g8^D&Gn-d^DK!-UfayawJJT zw9-WQy|1Bcy;XR_M%rU3nOB*Yn+a6o%JY$WkBEXM3{-6My|i{=KO0Emb83jS6?rWi zutyc8jq9N9XK7s$h@8}RXV6`df+~fIIQ>%{9jd7o_C`&#Wc0>#yUMV>&KU28LfWLuk(Lb=j9^2s&m+%qGMk+U zHG$lcVAPK||H|=)Vy!{`xekHUqm+8FI|Hwb=a%c@`c$*Y;z;h_`tgF zY&RV7n8m=zCk+t&i>7IvIqiN*Zq3cQ_SUgvpv~P4Bm?2ie~?`^hRwj#iPX~+Beq7V zS_Hdr=cR}j8z7-l~m8#AV#6rtvY*r<+b&Bah$0JrT*@I(uQu5EJaFE@Fyj6Y{ zX?0S4zS-djpZ$Gss(tb`yt44XWwKjq5~ZgH3>!*dkW2Ic+Y{K2!T1W|PuvrT^>KYK zClP$NeVh}lwDL!U&7_ar{;SHs{@209{jc6*tb{ofOBqG0iDY{JpwY7l3h~ehqQ&Hi z^)8OW(#|YY6?9N6E&<^WveWbhfBgQ_I?Mi32P>a0{s3)@yga{ecRxR+E#hTn8(S%B zOy%L?LtTbAY<-Y51HWEjjz&fMP1NLO7z!J(2I5&StQqp0r-}mVgRSVuzqK=aInya% zhaQVV{AkcO_-n8uHxX^*lR9G2uPtI7%0t;;9m+OAwO}300tFNj8r)sHVAMkTF*tiT zdaBKgPU{MYc0K#CT0PN%$I2Jup$jRF3Fz1J1AN*po9}$us1$gOahT0k~LmM1~WEA0KOu!8)uf^r^vQXUEmVeP!J9OvdPM0#d>Ve4Cie^YJA z^O9!nuR!Jskk5N5$U7%HVAp&d6I2?L)?~P2dP-7WWjd->N`7NdzX9d{IU+%c|5YfM zB5294CH$9fINO(7wag^KU5M{U1pGOXFde*sR&kOrT}>NtQoEHfy(Z6;kW~!USgVwe zzQHzSkN92?Sll#3?F`Yf3FsLV94E<9M%QXT{gr^av2>vLN90EdkgDA+%Oz_CL=}%imubt)^P@1pz*r{c%m#b zQK#8gucMwMh)Q2}AJk%?E|dqn!z7e(Wp~UVe*0^e1bxq0QcOne$afW8sf}vZ9v;d} zdNY-;#ft*$+n6~gtw=`86^DL8X2+wV>ed+SSiK_-1K-*>Pdx4+U(D`{ZAHgR_)_?aDp`!n-;Kr_cOUXD;;ow^!NH8qV0XUB!WI#MmQ{a@ zfDtcy<1Zuv%=xRYm+8WbEufZ*Y(OexpYlzZq`%o-8CP~{UKzDMIj2*> z3v^>nTvTh^SdpUH&^z#2tC(&iN-VVRA7Uk%a;V0|=F%e8-FjKXB2@NjF-_z4i%TeP)Ju0IM77JcxBkJkNuLn zf&GRh4z@(1O;@U*deE+=z#&1kTQ|pu#Mr{3B9WF*NP6qSk!PP(M53%i#G#s@>Vg5% zC4l@)xCW>vbYmp#R>YWk_vD~CRJtKz)s4ewF({&9A=;`|K{^3BYpkJ0GrCAKAzIi_ za}ryK2HMa__U50a?V}1{z~Nhm5)RTiLnGC*QQOUZyPl8@%|5{ou}7{~M!P;f-9d7X zCgwI5sT^zS5$7HsFhHFrw%E-k^10z|T)z2s2QK}t__rcn`DQ-u74>X~!T8Y@@GE21 zTblnFD^Tgy6UzJ&boUX^*vOx7K` zACS{V7PV)>jN--T0^M#V6`Y%Phl|UQQb-0GOZ_h?mqN2|GYlLNd)v_|N01ldi4o}& zCC{A;Qopi7Zj$VYNH(cguh-9HOe0-!uw=h>*+@^JR=8SMV@)zL?SgUHu*P?&agDDY zqdM)EQ4L&!vt2p%nRFoyJSLp$u|GqEW;-GchVij*_0|Ns@$rcPe1mg86%VIbE6BSF z@z=|cspCX#>t5=12YC5qa|&E&nPop!=K_9)ihE?*&jip7ZvAGZ{4+KD1S{H~G9fFD z5lf^Qk$P=Km}a}9%%>`)WgD44YU|KX)j~&x3a^9X*5{fUtdFP~o_{wEBV@M%%kxx1 z7d$1IXYhfco_ZgzT{*?yOrS#1a%uylfem1FK8o2>3CNsTf zSDrVr0+piFQ5S4%xYT>2TQG($c!ZMZu@-qr%M2D%AxBmk=-kUdb*3p3s!T&UTtkUh z@f8$lHWYddz0^VrA@<_<$S4wzLpgm;TM)fjoc8OWBqmF#P<>oHev-L~XHpSE z5IJNvuH}8XU`i{ZSS2#{wk@)-w;4CT;OChd^xVHaCEvTAq~DxhSD|qWP(~L+KFQe3 z+feDbDpBCNY)lcUKyl=F!a|)GH=6!whR zVrHRk+~NO-W9ruFr6(phQZ+A*sCXIEw%2aALn( zx@Cu~d&`kBZZJ(fI)gy77ELp!s@9_#VUvtDeBxVhih4iH9loWloSLrNCXbnS6GB{+ zEtRdXX|wT@+gz`tvB!7y+%~$6PqX9*vFHZtzB|*q9QPZNjltl-O4-zRaLvwK`0Gx2 zVa{#e&V=8{%hdCkWTaW;Vb5rO(Kv}GZfC&KK0I=Ln6cWmbmwfOH*K2TU=Mz(Ls_hD z-xjtn8i^C@0R7KGQ6S{~jp|Q9s3@ycIBN%&&O?8CWCD4r%UO2m_!BdfI8MgwnyLm? zYjVrDC2&-tPS)kvzCBOK{&-u&_4(0rLYcRAsb%|#l?qggGOxlk$p=3^>u3U0bFtkl zGXUQiAz&&+D?u?};1brHfC*oxmD1d*0B>14pp?8%j6eSsHC)yVI!{QJbBoBUwQ16b z9b$MS*E$kjVelL0eC$QBzJpQ&%FqY6qDDo^=JWWZf;M#*)Te*5F;9%)=v{jSL{BSIvydjJHdNPuXmyo& z>c>4HaV|{PU{Yu7sTsLC@q&gl4N8hEpZ-6XK}0#q8&--;yEReO_=*&?h)P?MTFEMb zrb2Y=32ms+8+Pe_u_RrHR%azx6Md*r(2{+?9C1>(wyq4_*)L4zpj=pJknZkx8yP}- z4|SYaa6#EYfh~=Ex_MJ`hPvIT^f_h=o94i4oYoE7QQK9#{IQ(xOoRL-(?1h=Ux@AM1F%BlP zBy?p{>y%uSU6WIfEbqWBl%I&{tbod}%K;D#il;j+99vsafyOh)^^!5J?Te(zwai}A z1!dnfYNsE?ut-=EqXzS=&Fm{Do{j)g*la=HlVW++D{pdGeK<1gE>PJVo7A;RYDpsb z7ZmR_mAjjU7oux*P*SA+T>_-?Wb zgMY6n{P3>}=ReuiFXY-*eVT8|e(yfE#lNfUqHf*Jd;v_>rNC+BRB6NELh}%eDf5g$ zt~3$8za<4lT&U**eW-^(YaNL288uA_O5r?yU4qA4ifEdO@EUGbKy)CVNrOUw>b37q z{&)<5hI{rQ{%UIs^Xz-m`0;S${S|*&6ZoAf3E*SG!kxXK$go(opcr-VK8iS;eE9qDV_be-crdgaES>xM zVG)g)YC*D4A{O+)dW(D)nXBNz!r0jq-1q=r!Q0oYtccGVV}A8 z`A91Tn8)pGkZlaDb0Yc5?$OIowzekzI&td?`@^oWYoC(IZh-CZfn+D38l;UkD?X0N zDT$5*jZ{yyX8Z~wvWj?xKNUNJEOhuW(%rt_g7SnuG3pu?2j+2ypaKu#gSdvnhza&Q zg1$ApDbF^SCa8hUhn~+PfdwTuA8erf1C4u2_UQ5dzau5M(^CcZ`rrp8S18W?L`q8gsE|(2 z($Yr+hxks@Nq?Ce&Fu2f({sc*sjYXgP8(~pF$;)BM@TpRHPHJ(Fix&Bq-@e2*x|pp zWL|K0)~<1(vK9Qqu@?;d25;sQZ9zAYxJ`YCKRAd`qw6j7v*KaKYOS}^*F+0Ep{Dg> zxUE|w4M!Rg{eni||5u}x?Xq5zDe zJ}r6W?U4>Q42Ew_JByU)ubZeNFv4O~{WX%-FRf)Vu(L3r?sOG+@&$=0tDh{~kk}6j z#R_W9hG$t&14PfN&#;i@#fR&^Zis~#Adl)Th=m*gYp6Nc{JnmZF0}v6ib5znyTKK1 zSLi&0>;5OB$=^$RZsYA23DifN{70uasgR-#(O|3f18fi{L5kL)a(+M#u(urDPt*VJ z1PAP$mhN5~@8s)ra+q2TCY`p+^=;qoO0_M2mEOW;!QTx-uC1$BvFaF}zPNIbUEAJf z!?)l8bA%q=;Q=&B9lRj1vv@l=QAZERM{T!X3xw@z`xj-zv;NQ@9ixDeo2`r|*R^KU zqCijb+6(vq{8{KU5u6bxO(_h#z&FbNk%0)&eRO47e6ovl2=>9wF#w5X+h8ScsR7mNd6kT!iOG?= zNJMh39P%T19~s!zY-(xau=#y!FJrb1f=iEFcL{HQxTYq&1iZa;DiW52BpgfGdL`UB z*@DTw-#T-pPdcxy(mo^MAygW~!(0VNDY^@AR|ps__y_8bqAKS7|<< zl4MhQ3ONSsX`O4#Ot^^5X`ln!Qpn0#HOAVp0of)+`)L=ofX}Q@3CJ9yEr=^?Q&6lY zThm1f>Ycl%t*zH3vGVN{nyhx(-RgSC z{$1}W{w`{Uctv{^RycyTmx{h%+4LN>pnA&bfGw726*>Lw9@%EZ%l{*pdX-2&N+!Qn z_!~@FS4DDvsKDc{8kzSt%Ktg|+a}q0u<+fid%>Ud@Jn*!E7h5OV9x{V#Y#*@@jnu_ z=UvX9oP+*IzYn<9tca)r^tS-aMEreB%-AAXZB>d)|mqP00jBH>Hgu7K zv_WMajxK3Fc8Ji>!dZ2nU2TKz?(bjU@>arp#H=Pi!hpjl?5$6X4g2y4o@dqkBk7pO z1*_pYSa5F&a5a!0MR%`b5we`Ggg%!X$ z?-rl1p3^xP<794ncL?th8$eufVPGHq5hn08Z!!@stSO6T>q?q)-bG2?r(&pR)`zrR zi>GU50ucdJgAEi(q7mRgAg}FSs)oN`nk`_v8uxRUC}AX1?`e%evks%iQ|Ol5)8#x+AcSr7c83tV6l#Vgu{`jC7VT zE#_m25zD#EJlYbR!{D4~OEweWJh>Y*I2BA-4zP-HXVfdOtVkHP%Y~(k%yua$T(L#+ z!zvTtKci`~Q{*%LzT2pk=c7tIAKsW$nQAZ>utU0TIyg{MD_$>*yhGvy4j>ha{tfgW zX028~y3#k3x#pkOgnS~5iDwQQF#H%61qEx^Q7E;iBPot;Wsi_P532^&*ykjyb0x*~ zn>6ZL??gT<|b@6;|W1HPPd5iFe*8gs>p0~D8)?T zDy3%`-HCoMDK zxo9_U&4Pr%gv$VK3Y@tLj>Cu-L!}y&;zcKb6aJE=Qw$WWs|YxfQ{8A#vn=QRC&aDsR8) z;=+RaH>K}Q>!LIX^9y`i`=^zKDqaYW}vEb^&*GSU#hUQ@wBq<*; zIKEoJWL_Iyxmg*xq}woazpl_M9HOHy;Z}HNrt{6U27t)cRtVO{f+mR_2eow3K|1v^ z`OGl-FPEVw;ihRtDeD$}6R!Qf2iYN?v2K!FilzhPZ!(#ykD7tp=vlwH2zM3Uy{r@;*FflMk0DK8QAYzF-lX06@d0~T z>NJp;iU5@Bk^mpKdQ<62rmv~)dchpyk;eORVwIMB)tjif61(0yDnl#u+N|HjwVaW9 zxg?QHob%|Hu4QsCx$0^x9&Ok_t#wplXV~pZUnbPGt+crZ9D4{MNM#?{-h*P~l5d*R zl~g7$Ffad@3uY`NX-mFIn8hV5Y)v*5Je4+D*bu;wF=&XD;OD-8Jh$_aFNuVpu zXk)`6}`CK^%sb(Do5M}dv?W`x3uP*AVpFNc_WH^t#joxp` z7_QZ4P2)gUw!-e|`@Nwt){dSfIpnE6jGB+fy4>Eg#9e9Q;LcYdVpp$L13`HPDntPF zE;t7n%+I)*pU;nItM>EtY=bzqG4THT{aRG^gG7^3W(X64YAaA!+Kg5=;FFkK2BgGs3Gm_?+T8Z~2NyPNTAIbmPH-WV-Dnyb~ zXXGIk=Np*%8!wH$QH%blmPNunM2kziPM*!UnMZ8fP!8kmH~5-!TB`X;=$COOr2YM_ zqj{{^gjpr#YzMdal`1$r-Fr{dK#bp%g{q&msu;dnVvk%w81hKHewbP(-#Qy z&h9XkcApUsIvpL(2=HI1yje+lpLQNNvA!f}y~h_~2!+=tXN@*Rt6sxJ_bUB#b?9#- zUizWlvG6C4F@XHeI&mc`@Cmf_r}rm)vX{+k_nJ+-7nh8Oify&G)+^COuB#C(^a(iT zuyrEx+3qdCK)GJ^*BIMHh68FgP^A}}MbSI87m0XLLxzp@4D1*-T=kTL6j~D3sA_+( z58DntBTmc(DhrJ;5pRp2a8N`;GrBR)Tr!^nLXu){%7 zsMZ%E`;mce1iRIza9xFDF7W?h*^yu=u)9XQi<~wmSRgq7mNO&RVLDLC8Rc&>5oL^` zYuc-;FXn4v(DQLwJt?b=zd)1K&C9flz*~4#BC) zf9t)$Mr!u1(BQOs4IJvw1k`O<3ZVFn4~yv^oY5*Y z&eYX)_i4|yCNSj)JBSWiiV-|&)Yj#_-ZlR%NE1J1sui`PnhFn{S|PLGZAfte)LI?1$Qd z@WNG?wpVt<9W`fK5%OQynr1<5b5T35@Ud6l>9u)|{|N?FT!5 z%AEq&2iH9x;leh4PrB}UopsT>f2l<1C-c|M=dQ%Ffu2phK*I#Jn=e|iQ^V%ImT$J_ zIwV_B=amSJOM<21<*CB01F-=`lxA0yBFnnfZ*JaSt!%edMb@h98KC;v=>YUuAf`%p zoJ4c}>Fkff6-S(0Z*P#Ns{cf>B^XtvcW=iJHr@~Q_d5-KUN6zEF19ex!VQhYd8q^2 zSi%b|oG{)p-?D_*M-A)+p!xxe)M)|AAJg8XU0jQ^It6m%O|0EzMYm!88{4-KRn75z zB2fO3*$~w2bIJA5DPBBu>ItmPJZV_(hw`4f+s1xpM*1ZQ9=HY$HVRe;p^Wr*zPhX> zHYg`RNEhTNn9|~;u#I!ncs_~ckP!jhht#+~NES8lg;4>v+ej;rVIBkw9nB7_Y+%a zd+(gOs#3%hWuKsT4`C0O>~*?rik;vJOt-o1StI+dP-UyeZ&JSqaA?fA_PvIDbo4$kV+`Z6s94KFQ(4xeZ(`}rmpf0 zS%~Y0#z^pEo1@mX9%1WdIs8puN}TO=Y9ei%l#>wftd+pXNnD`DjWAZ>)5>gg5d))= z$eEdApCE~;f<+;IGO!JY>E%FX${|F##83Yin2Y+*EdTp1unX-hi3_<@)lEh54H)I4 zA#$mw?4S6RPV#a>McLN``Bv*{)R$xl)v;4SN6j2Vx)C6&^Xy{rfNtcYsg696GzQXm zx}m-%EW1r!%(cF0bH98zyoCfIR{9#(b^G8zX^@NU=56ueVzSqui-)HdK}W#$E1}~l zNI}Q$gF}9(*0vO6RPsWN3LHJfUiihFrKaC`qI`F;BP@{vM0^Vt?3XFg`&K74yByBr zXX#!(Py4#_pcp0OQ4LFrlJot;`o&M%f9q!D88kk)o0{T_h)O`~eQ*|XWP_9ZqzjTy z;+T6ya!Y;1QOBravXM&sZ$zsquP?9JCF67KW9v#aQzC!2aMjutV^rm_)h*|D!dBq< zjqX)5Bpo`g)OX!)UgfpIPWHDlc81P-D>S?}-5v~XvuMJ%&iz~Ed2n-Ka=6m(Nqijk z)@|3xc8~AC^W$7yY!=^pt^}L83fNAs5f8>;lfH_nC<1cq44^rR+fO-JdeWo&_FDpf zDbPy0ec!(DkT~Lu*GsO@?Wa`mEj1ASpM7^XNjM{i_~Tli8OssoBC%0Z)R#DhS1{d= z=FJZVm)tJ*S?X;k-{d1mD+r62Kgz;m8=t0^Z2MED?f!PO)#{I^ZJLDYZWhN*29&#` z)eST$Ar+De_^FojWq>HC+A-EnMCAl%iT+CTyhz3bFa}fZq2z1%q2{O(sdMf2uk3%i z@Z`ehe`5}2WrrRDD%eDGUCC*WYj+J3M0V zS?J-KLMPr%>JFv>xt1JIKB?W-i{=!|-;4;Mobl;uSYe7B-t92E5c|8_1l4d$(SiH+J-Mm(;f*d6?`cIoG9 zEz0;rs7r6IQnn$UtiC;3Ltw zSTaRQ%#r(SSv>MkLrV#z5g4L)5Ov`f+AZk$d^FKri7wM`pBP)=rwu5B`4aY#?czx# zAxIA2GKsCyz3jkO&%^n_!ptlxJ8s$C-H3ErJso(Ty|>w|t_5oNmxjT6rV;&_N4N~* zp_3$UMmJLu1QQi?EQyti?NhG~-Yvl==o z_Du5FdHMt`a8{F$_Pc|iu8N$CF1M!Kt7XgB|0tvx{4JD?M}1R$m$8l}AHD66`zC$C z#g;%qX$82>QH~+rcRwnk9Y7{!2orVq1I{W-XFzwde#LIlR3~W5Q8pA9NdOn9?@#qt$0Q&#L}qAWdo;Gp=> zBG+@^En`;7xNE>%-64OHg&{>yN5$!*5ND0&@|wd3QH=eHseCud(!#1NpXRp4!2r8(3w` zp+@z$NMwCHptD?lrMb{uB9TR=H#uUDmYh!;`1`80bf(%AR z2mRmzTwB!SghMHFPg`gRB4`c3SO;tV4%3xA%k&PXpzX$Q8`RrSIimm&)_D{kszLA@ z08WXZg~YZ0y0i2E35MthL8WW;#&t%OG5crBLoYA!s>YAwhI5>JpE&7=u}yph!^qk_Hhtx@%)8@Xa1h3x&;wa-Bvv|7V4rdl!K~O zf}N5)7oo#+f6SE(+p)?wIxOaF9n1_e6NRiO!Ua#)uG4$hn zCXwxXaf4}f$V+ucjzuqjZ}nVHLqCPSmYrtQ>HFPT8%8kDzUJoQ6%YV?pT3~ICt9@P ze0F7je<)X}=(QFZab8_|Gf3lNIO0BTwx- zjfY*SBNNXCnL6nSP6c$Yn0j4)~W= z+K6jf`1m+xS>EJvthu2lZ0N3i&rqiPmTZh<`!1ne9OYcC(Vry!A^+X#uh$S{Y8{IT5btcN zG$WWukuZAWp)d&n_BFShnM0WlCk|)fj*#pu^QPDTfx<%`HKI!#Y^@V3;p0LMckRQ~ zWzw(-Mu*+K+C_iuEv5I;4I`A{6we>|tM3>(cdmv?GPZ-KZ2H2Iw;DR@aIYIW z|5zvJviAK)&z&l^|A)7Gh!Q2-+60{^ZQHhOowRM+wr$(CZQHhO+gaz{ziZOHdQg*^ z#TT(7hOt(B&wh6Vj#nel^&kCrtzzAOq+**}GH!iMwX@@_Uo_(4g84nxp`qWUShz78^xeAvs>slz_U^X+$NI0*in=y$$8a%? zv6ZovB=Jj+h)$eAQN9^Y?3teacDsIxM85vMkhP7o|%=F|`T@ zD_sLReN}eDkRH0WAQ`eTTCyktfJt9vgpV$C1RctH5Ui?xmo>_Zi&krJ|G9pYw6(Y> zmj49jO|@d|86p*2P{F7+rs6?QWVqBAXN z#}50_2z`@wp`4{st9-jgx#qS~wT#6|BE!#?WVeOs(>p(CZnesfRv~l?r(G1&wAlkD zB4=lS*j<}7Uwj4pe{fSZZQCX;&gmw81J%e$}EM z=`BQtg}!me5gr{~CEz**5t^3V_vgF5-Y@%LS>*8KjToyNP))k5PBc+>1+=;9IUT;z zP-D4Af*8#!kwO}d)$G{FAr;cqQUe=r`s%$u(p9=`Y7H^<91%Dg>g92GQOX7m+ZDc^q+X0t}2C=0}(v2vR zsneCg3d3mx^;h*-;TA4kOI44)wpmEcN0We&Dz0zYs|nI@X|MPFupiUE=X3B%>UvP^ z9utUkdGq^k`T>4hHI=-nq}t~PgaeEHtkRU~&QE^$F=8SnW1yI_HH773owCZNd5kMB zgf=S>;g$Jz2;d~er@1ffH06{Qzq7U7k~$&f&$TFduB?=2a5?aO30;q5$0T~Rrl~d- zvReRClX+)oIra9y8hNDYm?Vkf7*+2PZkWX!DkEM@=W&G#fH6c zEVBt78FFL(Jq_g5o>3(qet*Lt-Bc3}kO^MgGn1KGLZwQ;WKug#JYSf2<>=eq>2>T3 z;(FeFe`F+G{d=g=e7C#jEWMw8cVc<`d04AH{4z0;1)^JXx>h=65VKp0mS~4BdoGLF zDovBGSp65&L=wV->+WNivE`=@gtb`R5DtRbMDq#gPFW?wJ`)E}fgyN}@w|anGK!^E z=?)QG2e4lf-wgto&VP>tz6&@Ym4A{0nC{Q&zk@qOI~q3#>VDxnM75QGQ~-$o)b8cS zqjv#oOmqC_p8u5fC7z~P*6Shg{ObP(deP#h7rxQlC}eMkosqU67do(^OH}9R>0CNFFokX9%bZ8XAaT;tiZF!fdUpsIo|7c= zQw`*Rx1ic#P9f$d45hQnl@pF&FnD-}YTC-e*8TE&eyz-n)$Qfs()9&iS+Y67^YivrKnh z&3#@0yJtLgw!LtGPKZDUJsLM@46*BB0hGB+N8=HL)-ofbNw3YvDnUl5M^rps$P*FQ0@albN_%+hAQx## z@ze6RAQdczHWo?cH++JcD9cgXG?rL~8G7+QWRp=*q|L1TTx$0B1Xemlq|U8JRo-(- zqki=!z5c#7`-&lJlq7gs4epe-H2RfOLa4ep8;hBP#(1?qXj87(;jWcDvN10l%fhR) zEgIV-D`l^)V6M~?FrkA0; zbIGSaCAuQV7;TZ^? zmb=koM9f7)-?m2BfX&%jIk#i)RmA7Yp1x&e&s0V=dJ*+5aX~7|GnZm`a zL*q3`qh2}~1wDeGHPL(qz8rNyY~IxyRxg2^T1RYxz!5YU&()@AZd2Io@JTdqFJ2T$ zqZ1BrC2a!OfW8;dA3^Kvi(1ftF-hX$Bc6K4r0fJ}B$#wgi?{J%Ptz>tZ;cu~P_Y)w z@Tt@O=7^rMUyX`YPUfB`E;k2|ej3NRiAWm~?vSH@*5atVL@J~?RGLSEs9%1sX^1(d z=yCUS*1EN6S)l<<%}`I@g@{%xL5|?3A5y@Z?e=zv#mNmsRcm)C4C-ZIz`c`xmn$+4aQiQxpzCEvQRgF)z^pq2VA`xkOL{sv1|lnIH$)taiFcxea(<_?Vw&q*uIeiiLFHc%N6e=TIAd zfP+vm$=nj5n4m;DPQ;uTWU*KVS%&ejp3onisST(*g8{Q95KBT~)y@M;SZaA^R*{{Z zLA8A5#m0jL(6KP_V(}z;Bj-2aOBKJvXF0|>bYj0aN-)aUcmorSb*eZAem144q(hMk z(K6TB=CGGI!U{v|sn{D_<&erDjU-p0;}W4?&E9yA=A~ived%bCqhOjys6Ks*7XTr> zA=(b3P@66Rma!!Jt_OE0iFpRCD!J`c2KS{MN#cVBlCk?qvf_MSS!F&+h0pDSf@>8< z>msX?szsKK>XUD=my;T(oezPDnUIqEH4-Hd#w2JFqoF$y0P827OC9lQ_L3xoVBl91 zQ@B;XuRVZ8##4l9-~>AaAblc65v<;(RSP<}pEnYW8E9wG#dy>V(qUqxJ;OjM&!h`n zVh~XJuT?;wxA*C~8Jicwv>VmCRoS67iavJ?M>VP(W-Ev&RLf?HE?L_2FwIM$-MWS2 zWwn1hO!XDVt0trPpbj9ByqM9S$jNkULtZOxo-l`e6kE+*ilUIKTUrBtUebw1VuhKO z_aaftTt1Y=an5Rp7G-n9r)-YRvcqd)!0I*M)^>|Q_sqyc)A3akt3ApoV!;!$yG+x(#On^L~D&Yjis8PB@9 zzc}x_i;xNb(#^Wa`LLd1*sojVtX;;L(4PMp>$B?Do~kKbxiYw$OG-QcOOG%Q6TTgb zBKhS-zQKyPph{4_4OYZF38@MwZfmLcKD6TGIevcKSBWkT^khsHI|Th}ed~huZWtQ zF!32oV>I5PiBg!nk=^i5Uu4{(te8~-)#Oqkaf@ySUrDZ>75_Qy=v2=$KqwU7;=R>= zc?EGDU97ycStEE&ohg|wOjz?IhE_+7(XE*+71@-GXhc-Z>WK5~v_%h9ct4}A9J$Zl z?MhH~d#l%hy|3(ElmaeB41=a=gC-}DQpA?oOBo%?#PMc3TrO11)i&o{L10ohh0z43 zID-rNfljiueT#1}L^j}udwtM^kx7ZS@&4e2P+zGOWDM#pMRF7@k}4BWTYNRI=g;>& z$p)h?`>!mRag1=%2|+n@E9x>XY#>!-GtBfKRug;x+?AS>5 z;;ve$ZeC%GY6@eBoa(Gmd`VYb zSP|P2M7WcG-Rp%_!h#vcOof#tuIpwufjdG}k(l_bBzYG*U1vL?$9-r4;oRU2N?VV~D8o(0|Af zlvm*C(c!`-%C7m{zY~4TOi4ak+UV|j%T8?5ykVI$sQ>CQO%68Jf$E#06ZTW;yqcNN z9wLOL1rYzO0MNZgkRE+maWOF3bvlzGwBWCq;B**ciBd62Tqpk*j!Mo}3DPw52}FJZ zb`ajl&+Ldh3W?Mdwuz=a8%2oJoJq-}Iu|H~2{~&YOFV)kgpj_F;HiajWzc1&r~*J? z4UP$9k0Fzo05>$*tc((2SxupqMFetFk-@&+XkN!S%UV_&NK2>S7vk*X> zJM+Aj5?mQwc#g&yulguhwO1pNa6}3T+^&~dE*JWk?u^CH1H|^NFzJwjy zQNOmnrIkEfyL$eRQ{Dxb0vq+Rir_cUfT|J5-~^0TRd@-@M~VMM>Z%OJs*$m(Oh331 zpiC=~L^7>ZDNt5M=`W~yk$2@9 zJdD>4-1h0>WAhhlKCx1aW=S1S$|3hK6l^Qqq?<^pjF=Ex&dBXGg~P*3GaJ*|X2`?B zrf|wI(8ITPa-sY6nN<)9$#$8sP};wge>vLXKez1pk6VgbNC;$$B2A30NCC36Ek#*+ zoWik`g_#T|gNF;-@cyiu@(k>Sd8wSUif{)LVsh7j$#JnqXjU55Y?7X$nGpa3Kh3Rv4EAuC+ zNow87r@N0v96Cy}oC6S(Wee9mP>mzu4W^tMkR@#*CIR#3QeTME3P2|=^#V_t-JsrR zsWnd<+DMnO6;KsNr2h36T4#)!EPoFHBRlWZVf%6>I|WwgH{}bVu%5 z_rutRJnaojWWwMnXrD))zU<>qJmh%l#|cs%RlOlKKosGdFQ{sWb@|?JTelw2ZPvf|5AUcrL_3(b zDKb(S&bIgt4zEo?<#w^IWe1Jj+-uTa?<(O;6~Vj@@>KS`}~;`XFTrz2W6 zQf#bHeoeL52$((dR{Pr;b@E<@JqotGG6%=Fr0R1E)d!xMZSC7s&hjoeaQAFeO>DEq z=1IT26_*B)5Kxpo09hT-2CV`$Nq1P%k+c+Nw^79`OuEP>vjxXW=-&edgR~uW5>Y7J z`iF;oohi3Zr6S`hewj zAh!WI$%Qltv0&m!&M}hY`X9(M#+6JJ?HlRtN3LTR|9ei3l|25o_RgnSadBlGmFk}d za!Z$(u23u#B>uNbye=%?L63+zt+^p#G0DsJBgGMChJG(z7OK&}rWS#^-wO9If9lL+ zJzKukKw30H8~k-CHJ7pqX)@8O*^uV{jPa^Gh8d>Vcnnxsv`GnoY;fGnnEL~-$MnqA ze8x2Ks6?IlbX27sG}tv|AFbMlzsA5Up&rH&O?5UeuuhzZ7Y9DHZRL!XKXW+vmOQQ$ z?T@VMf$b$pt*qp1B*WEqM5>5%E+~iGSA7WPi`5$3`fc%o;clT$E3HWJQN-6^;#T8G z3{pBKZ_>2PE2dYv0K2!6x?DCQPj|(%3PzlWhP0@Gc$!J6UAb zqQoNE$=KSD(ZE77c=j^#3tE&?D>!vyex;yE%xV!beU#R@kyaZ%(%bNm$6E^_A}9(8 zf)YtffM={$DmJ*F_5z=wgzyOLeTue;HdBs1(C9yAS7lOL+kLx;i2N?)b-;%yEI2~zTOiNUU1(ANU#{zhkBr_wsz^WvfpS7# zf4OZuo=6fM?s#Tk*zh62LgZZ&DQI-2*h5>nF)Z@GWh7YO#rYc85|nr=O+d_PlEstm z*lgZ7`MTD-F^>n!=2oM1TDbpe8uw`fzdjs`l{V;XG0z+rH@%Bjenp?d4qeh;d$U zrr0>rBhjWE=+q5#?E&^>Y+oH`wJF&=*3<2q96Z)1+kR2o4D4YYq&|1jUM7WNb{9;~ z8xM^O$EBxYe*cy)5zV5>*ve(}j&j_-XqFmCMpXy9jJd8GYB`*sohu&6U>{`?xc`)DoPX0#FF5QRPH#*UEf@cQQ)0wpDcSXa@7$`9cz} zAZ3Lk*WzT$ z>K$I`#LW%WuI|eLvFPis2iiHE0YX#EN>wNOL6|WuO1@0NVRvZ`A~kaj%O|2w^5xwP z_+)35)L0vPb4d(M4UK^0V{r|24ayvxFb}TqUX*7C(C6!=Md!e3no`TjlNHHx18J(T z(@0RSdwZ(m7J7D`sQUB%iVa>wAjQk=o&LHf|7Z!C+2>GZQfDS&Yjm9M_CMu?TO0j$ zO*Dn`fyd}(!G$d%1+bt6EhG7?A^F#s!MyvJYq4u@j&#oz+4HAB7+xBxP9Ji~oecF{ z(mHs>MLGI9(N(w@cyALgCd8k`xXIzXL-FLsA$5Qt#1xJ|bX zpM`}1sEn5yK1xuJ{vU8rJ#pr0XvebZw!6{6J{H&JZx#I1T7F|JdxsyTaUoKXuCL+3 zLkq8#s*xSu`q0zp2g*?6)93vSlP-VVd&G+|v`7gUdtOO1RF=@82mPp)=^lP4?x$r7`=6! z03St|Wb^;FuzdSoefzo|vL?_)-R?e8+GTN!zUWBa%os5_dY(?OrK22R+Fcwk@dtpkE5*v_=6R?4@j$XmVa@Tb0p-$k0HkV2j*2 zwHV&%DEde;ryiqI0b)8&_E5SpG6hi!7yI}TQ(lpXJ{~C}5#yLc9o-%I#(1na;qOdr zfJCEx1D#shl2F)Gj*dxXg_mbpc4V%@%UvRcbiBEU>5m+?!(MeM(vxs2D?kjWqzJi5 zm${VqTSymgu)QG4y>Zdeq~hIHS9ja{`HS!FkCXlT&&S_i}mq`3eYw~_$@D2&+lK|#J7qt=lk1x<}~=l;lQGo`2ta&Wrs+kZD&Uw4?-X6 zN6w56%0OzNiIF2vWyBLCC?A+UKP5&3+;JALA5}wXYI76Eb1-Y_yY<`Q$b@tw0o7}aVGmM)_Qpv16JBVvUW zxnhSAGl-U)z%#aYDpbXddIZ~*hoLo#@w(S_>5Wu(Y1D-Rqn7Dh;e-R{_(uIKF|MuX z;_ZXgvpo2Bb?BxL)Td~!F#T5HpaZbYm=iEeXg*?rJ1y1zjd|V}W3QZVNie$`|ep;9HAw59DYjcDy0%+BFSRtb94YIh0331C-u`F z&LtC^9p3<=mtx(8YJ^tpN9Nvj(_Zu}8CdGG@5!(2-B>4q<+&F)G7%Gg{kO9(XmN-? zs7>O2r=Zap3Mc^ys#Kmf1Z~$~M4r~0f|SA#J%Wy3S}x*-N$OFw;hKdSbX)=aVb%E0 zY*4BO{$n=1J;b$R;qNOHPNPWeYX<55eq+rw2x231*UqijS>6L3-IjOO3 zYD`mSz1;z-;s=<21dD{@?<1~w?GAKSg?eSUZ%@dj5E?Cms!o%{GD;2V?-?4$8Z^Gv z(!4^lF*f_v^`>RcLrltj>&uo;=WMLrXChyx%V1r8$s5_32znPw zBg|@;y4i_k_iI!xV_eOgRynR08nazkEje`0c)%lIE7Q!^~q1;rp z^^GhxJ;WupV!3?mu+OrV~Yvqr63kY%Q zVZ8HYB~&3>)85fj0AqJZiSR>|UtCS88U>EOM14(P(UiYluAVvZ zki%dzv$ixgt!F|~q;MM(A1_iko`RWhiIQm3!~8HI{~@*<$a|y6r`1mgwa>~o%(86Q zYvh216BH+Xz_9019(#r((hz&#KMs&fm@|Ui)X7L}N`gNwz>roYDvoZqLgu|M7uA!UjwTu6`?1N+Qsi&@U@h>JKg*nd+`LRxI-s7-N5H?_^?I zV7vsobRQ~JAu$ppPu*Ibk<4RO`*N~n-34jXu@Ew~Xl&PMZn4Iq5ihImM7Ew4ynu4S zLc%-FH*4>Ns?bMV9fEXpkv?vR*`bC9w86@b?A6sj&P3Z_Av zZnTga%g936g8DU1o>b(tVa;VTVHE-pZ<=gzGZst3!J>+RNbZJXf?!8h<|6=-X$+la zli=GPeio!?+@VCduTm78hHN;RuPws3F1N|>`J0St_my19g}-XSxg=0uHGF2CaaveZ znN+~qfx8wsq7GdyT6a4r3f)#t8aWz*$|9r*jTGq)><9pO-xvc_6d8sR7>Z&?GuKxa zETnuJ5$5VA5HNt^4)KfwrADBH-v32p^yRnfdWZCOxr* zWYoIBK!^XP0et@nO|M7)y@QOoSi+EDpTiLOLuZzPLBxQ~FEaO8)Vs=QV z^Y$#)^sg5^l%+fyjM_}zjoiNouGxaaz_NqS!GpTZlEA#o`#Ay0;LqCn>X_iS)Tr*fLZ7)P5Mee zz)uUzUX^gk7y3-OoF)_`!!50386aeQ^R+@Pq>n zcOIYb3&pac&Sovx5^uaNi2?bJRFC$uzCX^Mw$q=KXR)1q)NRI5?x=S{S$zjKSJ?o0 z?ws@?%&b3Q@X9hSS*9+6{c)BX5alrxs*_1}LNa&bhC_1MH^q}yK()A$xNpAI@v5;P z!QRUCqbojxCr;x4r^8H?jmhROG%IioNcCRh#bf=4-9-tD;wFwJbz5sHy*%&x2Sjg- zvNS)7D-A)!5|=Cx5tk^O~bOV=a?*JS04g)sc)cB}y`EfbK+fF}!jy!!c^J;!9u7hRX zTP1N9jk}Q`tiw(>##l{qiJ|&pu?Vz`q%4Ss;!!pACz&&kq{0TQf)Rw;agQL|6V9R@ zw3NlJFR{kpM!JwtncKNgtGsVMFPX?kzvz)9?g8m@jAxL9a9YRm)m2RmUT8epJ&*8B zR&ZdOE0)J{RXg$loj7Th&8~i>rgm7sR;`r)iOzEJVB5vkX(sgutyhRr(kdO}RxuqR zwxnDJyy43Y@!=^lG%3>?o2L3EH&^0D{B`A+l0AjYiD;@!F|y2-4m_^!)zV9)OyVkF zgR?ef^`EEUvjo<_BX&d;b1`UUAhF3PCevKru0Y>(R_CaNK6XYDpO{ZdJQ^?oBf1Ka zQt~q|2F}PMuK$MDsyDeH2@NtRhc)F-Y;!?tc$cYX!|A(SYM-u%Wdw*g*o~#2*1|9y zzUnG7>PpU}M9FdNYbdloF!1kr zYQEP$ye5pJpkw>}E$d26@CTG;z%(RK$i$n6ZHZp&JsIp(VBp9|<~BYRKx6*2^+dG} z>+|~A@sx#P=M6rUHr*c!##L;lHl;-r8N>MZ7bdv^h-E9%qW4E3l7c!1&|nu#@Wh%c zQW14+up0gGh^ZR97ze6AF--D76M3vHB=YfU&V29C7MC0BoUzr$DS2`s6lQ%x2OPW1 z0e(s1!aeglAs5pi6A#*n^(^vw@bfQJ`0kW&Fv0ElR8$m?E$SiaRsp^`vMC6fsGPVg zi_7kUVpOOKcR>QJiD9IWb-`_<48d>!(2nQ&@31&Jk|W9symN?8Km?Ny8oM-vr!aYs9$j8o%n~^57Nu-1SB+yqB&P<7r=%~} ztE+WP$codQgWRZZc7*%E z1zx&?uKMr1Vt{&%#wM5>S~F*>EM?#Jy_`_zbetN?{LujkrKtPNgjDAm=k@tMu6$=8 z%v!;IOPHRBk|1&qKcjkR#EFG^mVKTb<~uFrOH2frWxSz}zWIg9NyU^AlPJgh8q>Dy zCTHo4x0=BF#I&I_&tdf}qVTP1)o#F0g};XV!|Rw)s#h03p8W?0sLihS zO6TS?_7;BESfzgy<)8+a7%UPUGKh}Kb1VAq7SQmN&(0c z$alaeQljI#fh=`-ZxzI`d-V63^$j&|Nm35F258ccS4y1Q@SggH#@i9$~7c=fu|#v%@p zi{;4D8%i@k)de(-%l*HU`Xk`9OU3u)UIG)JXOa}b0^8B=(Gg1w-95m|cWy1+_a8s^ zi%m^4=6A$g1*us#2~shXyfjiW4o`kJfE)SBK>uQ{s?mk7QS129BUd$({-Q#H3_GrY z-cL6xZTSa%Vmy(;`LU(tdk$7Rsf=pckBAjweNgFV2qnGui4_~KrM1j&DR2v&IOfhn%5dU*6UX!1NPc|Kh* z`vLnvj3PS|HLM^NfBmy4OY(F%#iH7o@r*Uu(W8wQWTX385dB38Nx!x<6vr051e0et zb^LRz%F@}YT0;RSB@low$#(2SJhPE*)2XQ3!dt)RCmz+h9=!fXQ8WN4z71~VN^_Xs zHPo3HpN~yhA+OK!bJXh9kPF_anH$C0+?N-AH>|&IpXH%ak8fv#=}+`qJz_SSvNY$W zSy>EIErT4^hGImySxYaC=fgf?MsW4liHaK{Zh0N%R=HVytOX(4Ma{A@s#x-aHW3?S zWTn?|0cS3mA)ZuqZ3a){!Mg*4Hd57!hK6QYlIlT1W|oV;&@W~yRGsrXhOQy4j3qc0 z{uzuD!Jkc1$ktG^Z=gMI>LyawdpU(y@TngwrDKb0|GPVc#LViEa z(`(s?@MD%k{O=yf>pn~S;WsXI;r5+}qA(;JzO$ZI7?5~l7C!h*y1^JPx36kq@MykH z2*ricNAW#v*mTS=9iL0FW!3)$q!g&ravBwy@wqx?{7$^}77jOiwbCk}HE{EWFvi64u#o$&2U|<{kxdadrUUta*w5NAKfAbU-~#6q5$^zfp=)ue3{oF>^+pGrZQ=hXEB|jt6qcAf#0C<9f2m7myEz>Rx}#4gs}`i#9kPf0_BT+iuxDsSfJ6%M z*2Co>5{!Gc_|;yMF5<=~uwx0{`_cm}G)1TjlQzyh)MK{2*OH!iCgE9!!i1MIg zwDxNuu<>lkli>C}_^MGN-Ho6Z0=2+HR6(3oe$299T*iSVPX~}k~jb6?&eeS=2HSrpp+!D+VG0Zp& z-HHTiw}x=MT)$^M8zbl2n1-|Me#a|sXjMvU+FNN0SI$6VTb;pn(7~!3xWkoMQcIXQU6NK8dOcGb+(i zI1zwwbJ;EvHPwo}{RJ>wA9+4s=Gbd}FIp0;cub;Ecd2=^5;A5y%@FXL<)C>3r_400 zTPFM-D138KXnO6ULJ;XD`bZXcqL#I9MT<))cL0POGn{TRgN|vN8Q1L4Ti2|}hz|000mjD`5sB+`9xhj8)a$SiA~lNq=5IPh-XCVx6y-XVD=TtZ zDGna{ho6^EG>#oxJAdc#vA?#!dD%R+d8!H$j-e;lH#3)qsNK%qF$j+Vsyj*=>_t2N zO}6Anv>oeBh|H=JWnz6yH#X^*HqSp1I13-uBqEfh0o?a99sa7D*1VCD@T@my4Ds7IrxFy;gf!})$QuBJwC1gEXY^rXgmm#i(45WD{v{`EWxCZ5iw)r**w z2m&K}B{)WgdQg{Csh#t65=YdR!NB#PB|mP@4gJ?0C-l**B|e_uC*S^1gqK{i!gce@ zW$9-u7$ z%}JFD*w+vm9aO)q}=P{h!g<`#nz&X3qabW-D4GHL#!&ow& zy*$Vn5G}{**S^RoDyP1-7Z17I2H`dw3)%++Jt)k{Is?Q8(&Cr{EBU?q0o}Vc&Om^O zrlhoEIdgHA%=;kfv@C&)(3ZB)X(|o6J(jlG14ZmZl-wqa7a!`9srcKFuuw4c=)W7F z7Qfm<<>>4EBiKt^nUx90V#ln*;prH^t7W$6S-;Yd6UCT`P@_etKPKKx`V1w&hWB6H zN0J!~z7mU^E{e3+167fg_mDtg!$#uc)gH+68jOpT$(l? z+Fc{TTF-E>1FN;2zX*mz!H$(et!afQ3%%-PZ~xF^zu=Gv!3w6<6XPtrT#)$UKEoRj zS3d@|P7Hw2cwaVV@LM#3gdA|UH6e4$HZTe+QFK9`eCQ+q>*0X8e8ByXm`15pwm5XV z4~#cIg8PLLE`ciCVQ_HIA`lrtNAemCeHmEtHWVfGfg6X^YVDzpm=J18{yF;Cad{F= z1w34J-71Pyge_nH{a3j%>(n4@$JvLLOw{jxSU;|pBKD`HED`0Mw-rsIkuaQ{QA{A_t>6g)`9gb+0P)X zyYuUbz^t8fSdHa1fN{6XpO3SHLFJ;WP&pS&#>dif22tnO0uZ z4d5#F$M>r*x8IQenZ^CtK8Dnls))K^C2phK4yOwxK6p#F4qkLMQBIt51)3@A`Z4Ft zhKwny`O#UZs_D9-`J4+${O~t57bMBK-2~uG?S_lLf&k#Omr&t3@KR8+$N|Xhg27g| z&V|~6i_|R2j72=!{?NRX)aoZjqd1dog#icM3y0}zHWZcG{(4F*230RBK5drxWu8;> z8Hyce%Q*(9LxlYq=Vt4`icx7=HcE$t5Py?Z_;XuFBKvfn_QfX!d3KUDU3Mbri@cX! zg%hS4uX+TmL*!MjM^Tz#T&Z}9-$GHNIn;^g(kv5D4eC&x(b2c;QeD~gfrcX*!-9*^ zt7=y<;Ci$x<*S$r^+)tLg~)YMD@y~J5UFw5THnRYR=B1qD82a^F!~q)1O$+FD5dQ= zU|T_0_s>)^%su#>bnk(|LPc;LRsFi}aZku*Rkxq0nXKZv+`H>O7VRLbkvnLfvzp@M zpehCi`~2!1zQ`&mD~Ug<@_-#MRB9vy5IPq#&MbI)JyWq!1-zTz{wFyK)P$z%8iP+B z!CaOs)I_*lQ@0nsOqV4sq}St8UFILneYdjdNw9TjGZ*T*6M%B_o`Qbn!E6}>{KPZA zaNId5VlC1I=YKuDVs=w^f7``4@bc*8BUOZAUwIqyVq0IY2qrW*d}85h1zHtZ6QR2l z+e`rt2Nvj(I5Zg-my)P5xyW%6Exo<8V)guUu-9@w%tu1h9#w@QE1{w)0Q>`jYUOAW&DIY$*p5H6k zj&g^Pzc+q=JzOo zc{vqBSATDE7-}7*g=})YyaAOto1GMSx6eSZk1}F&y*NCaC4_W*y~cwXWCX+ocou`;=B42=lz^t|Ug)N&#X?D=8S)PTe+O%E(YHj4fY*WY%nlFeCS%b~# z5_LI9>yqxVAMtjqsxd2ZsK zQvdO;DLb*4I4|NE?Bf*P-LUlcUAzHO6k!_0Fa$M8ZX)61T>;*0mI-^duE01+JqVoJ zF;|zrGfy#ITeOLiM9(w1JkL4Fhzj$3CDi4aQ$F{v z1>Wu}?dt}_D0!zjO6WH`_d0n#GF@}U-rV7(V~wv}?8b$cz9zj81Dhhs7SzLj5_^{0oS^Yye* zqh)?^8OP&Ai~1^f0&NwT`YbLvus`KHryegr6bH>Jdv-*PW~Z)(&-Ri^!kXEU|5TNz zzr0%kXW<+;sV}29Ny`yw17GCA` zf8=3;yBRF#CuskBBa|=gc=&_gQXT$Jc0oBtM(t>=?!V{&ZB20VU%i?YMMPC~UZ1ld z`j?e~D(~H6^(>6{oeziNK-s@{ltvwans5o0s96|U{3RTOMq?4a8hqA)q8*jHGL+KT zs?REqgEGMA+{;hkNTyA)$#oE-qDR2F+$z8dkJj5bEgUA5c&-*dYI1V9!?>F-Fib1G zgplTE42z|P&}^&#lc`z}*j-4Hl=B<+`JfSBFGrz~v3%ZbZobZtOv&vUDq?|kqyqNc z0nX3#hym@_=~E;`kqc>p>k)OLjS&yWEsq7GOLiLV_p7L!@z&R1z%!!Nhw_qbk%71k z9-}l{yaONaXvZCrIFA+V^!djLHTQM~=mEyLP>PlufraqVp2I!Q8N6kcQIyk(wv6#x znJywX$aFqk+is036zfp3J2}h0T4a=cJd)k#S_i27D zPy&dyccf{j4~k*+C#L#9XWC}w#~chz)m;ymvM#s@yEva`%)$XcuIOYn3eBPe$Q5o;)f;le9`#p8XXNN{s?7g#J%#kb z3s3L>B*m4h1Yg)pAhZ8&A$Q+he)k59sZ=1C9k|H|lTt`;atC67MI;f6LW%_suwpQ* zx?*^u9P9vy>9P+udmCVJ?zOe7;%$9>J$1w!XVy>bS>@eBrgv>AMumj^`K@sJjhv@! zd9G6W`V2nfcm;K>PE?Vj*GE2+OSY=f$W_lxh4cl2v;Nf)IS~>{Hrw!w(Is;{Z(O z49KHr)nFvbG$nrdzP^l?9b)&%^`$5HtMKt@P5X0P5>74_^dBN8SKr*)&z*iEVeFW= z&&g`r!z22SQZlnZMy3uVbCxD1?S*JQI1`@g8C7?}b zeG$*~vGNyFowxWVgmS(iX4`rOlZ%+yW&io3*_d^zjJUKVT;2DDCU4-E!+><4wS-Y@ zmG&eP6Bq8jkJ^3z@p@3@zW;dr^c5Ru%&glo)f(m#?`#+;@fON>U$)7vQt9KxTuTY{ zSg=G_@a@|N5yk*7w+uOR!#@r*{-Zsn`=t!uZLPyTAA{n2uj4;RPru)APVsOXLH{bDX2 zMp}$Y#YS9rtr`)y!bqfB3>vTK@V0LX4YOg>><#%|p&t-j2ksm}2z^E*wcOm)aieD$ z%27+k&ce-JC;Q~nte!y=o>p-_&tMs%gS!^RE;-%$w;k9BPA9Yg_z-|-r$R!8=6dfk zoj}td1IpNH@@EuFpN@wLePo?9F5N=;$fk%XUZZ8ArJf_A3KWTLdZUyqNcmaB5{u!F z;f-&nDYMehgWVQlX-qBlaQ-0VJmpJKVhNHddI9I_N=9T(h@Z}7c2GmY=mDK6=@~sPkC&z zUsuC39ER*HRr2dF9CnaPoUO3sst7qbm5X?SMbD-yvU@i`Nm5A{Ofy+UBqk!yEhoTt z1(a|}dpyr-=>m+HG| z_w?HG!G@yKJ@-Q2gj}_fvxv}QOutSS#@-IZn$b@U3bYA2>s}`&}W`(ie2v?+f z;_+7)fbsj^|Go|$diC{JUyaG7&3n(83gJr3EI1w%UW{gie&mN{z2pKZcVQ3U!geFjM_w!4wANh4Za%HivTAXWqTuF!G`q zK)YV2y@ZhWS@pj#i$1D1-MSHfU}aHV4|8T6_QASk)(|skVNrAn$)={-af{h+Fe@Hs zD&Jm{leZq~>y87jbIs(GChRmcp&cH;VQl9#Pcw%YuCm#hGE>Qcdx`eu|F#u79 zHIx{CqKg$+%fbo$Q&rD~^_o_w(EcBrP6Ry+s$(TeX`O{GT`Ms1B-&<8S46Uzv4A8d zKnP;icE*J44Jp%wbD3HLOe}fo7D8~US3%tcRRKVnDkTyQ+mb+@XwJcMStmAfPS?7K)FcSO*zT9n%@rgyL@PP8P6~`&LdiVF^U1=_J@uI@ZIoI0<4I-CZ(W^X+i8x3OF>QOUJ`8g%ojt@J^)*uXl2H;)xbMN;~dmLU7;n4zC+ zEMk;I;LM;d+u9(hc*q~9OfPO)0jTvQyLvvfI$SZ3!W=o~wdVjn`0);YygmEz%+qx` z!`cXRQKY<)5+VAV?PDKx%sQTO&9uO=RP{w*j=0GIi_xB(M^ILeUDeR403e3kBIvIS z9G`_q-yqco&2xdePMulyoil4*=PCR{6*OTmvb`dVaE1&^ds?mBF;BNpR@kb z1ky0r)Fs*K3D*?grv4O`&El&3bFecGxTAast~&ER0JzKYC0SD0k!mr#1}-BhUvd?j zRy!)mA6~;!{ng(YkYTxH-y2XrpcZmt(s2X$vADKczdnlk?2FAdh}TXz2k~+O+GdPb zsqvtyh>MaO-iI+SQJ$hxIt^;s1TNFZ4xgx3Ex~>{x zS%P4eorAWa)+v{}07w@s<>eyY3Gic>U)dd*U?HAah$i=F@_9kv`W^=HAFXBh08@PscUA;8;dn1nMi2nd$+bAL%;e7;C!W~cTHQ-OacEwrtk=aCqNcu-bK^I_vl?fz+6Jw zMMyvtJX3w>!EshWG!f6?rTPxS{BrB$9qiTsV(YAURf274zhbQ}F$KrJw`7#(5ZV~G zInU|?*OAEhhL^cL1l<^g&4aL1OpazHPlpY^7rEYUxytsc=V=n9n7@5n*T~=UiDcyM zoaQ;pVo8(+a47Adc3&25e{U24FO6y_D+iDaRX&Vw-x5vbjA>JVWuAV?^gGWLc(8ly z0E+d;A3qF;!)@x92O+ccfu{(5P~^lEdHQmKW*;7Sdvdm(CR#@qG?73XEjXT=rRfBU zJ|$Ni`AyAV=y0jB_?fMp7c{6aGmf3>4cJd7rFGi~JtvAU%1qOoiBe^&&Tf8AG4`(u z8GFP58n-_9$Ls3^&=&H<34UXt0OW{*JlN-c(LiQPKnAgdOsH8KimaUNma=MZIG_rR zVGL_;x%H!Ne?bH{cg@(Gloc#bfeWpq z-#yu%28{6;Emuz%0KOaG3N?)#v<@)U4vey238NEs1xTp|*vDF}4&iw(04?>@IGAZB zlKaTG>u>SdwOyLMxgL1ku`9FVJy7Iu3~dWCLs$wkPLY;r!pyXiWuCVSGiq(7nyP_B zMeAI6ifKeu0X52kX$tgWqd(cjJFh)ymTlLninhC+fhG0%(|29E>{q}4en2Frd3ddT zwsXK00?d|`^@;@cu0{6w-KyO?K4%H5681-vMIQh8?N$A?`k?*u+p8B#%{)t3MIoAU zb${Sx{^Y#O8-4Q9JS7KG=B-GXpKY#^y}Ox;bkIuXc#6YK=FMe|ocPaguf|CGwI0y# zh*sf==M>o!F@5aF-?5nv7KU*)$Tz_PRh+K^E;We)f*XD*$9FAJ4h7e<^ zc>?+BB}1Rhm|R}ne7t&lad&n3hPaY=XgH0@ov55VVG1Xgr_X)s%&I7wS4UGE^GK5G zoa+k9f^?Lb$SwB5yls6(Gma^VbH@rgXKSUw+`_}+Dx1_UtGGXijps^6KWlZAjk}&(V=L>ZJJ`D-*!~Pr-@|{a7bY@oLW;7ra5Ms^NMuo#}mXq+K zHeeQWvqHmmS0V0`(Gvbx)$;p~?{=>Mj^60czZHuG*JMuB+=sqYho*k^>{hRSEm8?? zYfXZoh>={R$A5N@uNn0Ib_^qFABv-mV$IieKkQ#DPmxb)0Ijxr`In$0t>s3KUOGGE7gCq_x5NiskkZi;rxdMw3kk zD`lvUoD;#h9PlZ~f>v`TRJMb6$zX=AEL$P*7R24*_L9z^B0^d>W+C~KXKZGQoAZ1Y z#QNEk*9c)`8}%|nL+KDs1LN?+cXuRbSX?uQ-4KJ;V^_U-NUT51El_nhv{wuJ*4vL9P1nbYE;)bmTO60u|h40T6; zVx5nKJMmyS;hJ4_*s~uU#$5asyjI~Gc$vK8 zN+VtgO9R?}1o)Zt_y)+fil1nj$~UOjU4XfSRasOCHQb5HiCz^v0aO3(?F~Gs$REDO zINRTS`Sq*LZhBs{^N04W5E@okm&!Ok!o7p9ZEo{V3z52%{XYo8oM*ikrt zcbWS*?QdSjwBO6lh5x;6h>nbAZ~ZNl=(anbu7O=2(5Mm2&Lue603_<=hufdtU3_!( z&a(f)v1dp@vjs*Ye&n}};oh#vV?E#>)^c&>juW?oHCxhf zhhQ47m|V8d%5AGfsp z_ha?Qn^ z^`$j$9MoFb?j`O0`{YYhx_w3*(^(Hf4hinNslBpxwrYRdD!JP;j_g}F#0sk3ezrV+z#Ztwb{ z<4Z-^+6c@lvlPulJ`1}dV7ay@o~wd&RcQzxUvDD0mS%*3rWFTul~E32)T%^Dy@d~B z5R4FdW33N4$$rH-$+j9kPnD;vx`vH>yrOcKwKQRyEN0a;m|T)X(*w8E_dpaJMHF0J z!zhERYdF^^iKY>*u3ttjv~{Tsnh^0^n|SqSN(-Ou<;bkA0Sw>a3*9I4)fIDOjMp#l z;5Zam^yrAmx7H%Hr^vjv-j2SVZ2jq|aTSphrKQ2m7HCl3OSM5a^J#1(yXecNu~jWz zLWYg%+Q#gO&$`jl=9iFvV>c0yEpl?S2;=#)b5z$>W+WM&v7@EwPn)}A=VlHy5XivH z9_tkxU;&VWIe#$cgX>qEA7owB1E=}$oFQ#e|J<{ORM)mE;uaZ2+JyP!xkajL>qT%- zzLDx0ka?uKw#h-V`Suo#{A77as_R8ErZ-)C$W>BZ!?KoC*S_|XZumU8rkZt3@QtYkwwe&uIMx0X2eaG-QJP-?jjJJ7=p)Ln-I zrNe>J#&tMQ!fVe5O4dNBPLp2NVUnfqzDN>L=K4Cw;@y0E(~(tQDVze<&id)I}cu;T0*EOzemQ^NS<5 zTxQm7qh8uy{_-m_7o}9=5Z3(~?xBV6j5bX&MwKS7Uy%jROQY1Ix-T$yzW!>!u!=Wy z;!T{*Q!6`q3S@(`mX4ht2=Yf>#x=Xgs;N+_xfY)|7k!jv5v~9Cmix6I^KSi%Ex-3x z(MFhW=sTj6@cCHxtNt{X7(4+Kx){HIBVOvERuv_GMdXEH>SXn zd%J7jAt9Y1A5OY^puFsDSf3Rn*^Dbah$9$Zs1F#$^BM9(F3L>vBCA^IU$B}#bkoql zH$gGR30Wn;rFG)VV!1||LunR}f=cQ|1v#YDsrC3!IHy7tX7stcPm#AC8kw$gy09D?LF%`)_tly8BVaU!i?qT; zdDj+~YCxR(_X-oebbhxXEfCoZT_h)Hsw|ALtCp>RmVFBrZtm_~SR1_4)sirb3Z#GLhVqO9lqsQ<7H8 zNxB~j#sgL0UZ?`)BzYT z%WNo|n3A`wnEPU$qg~HSSXT%UPx=)Mck`Yd?ZH5hlVPm5PucY^G#Ad3h~f#df7Wt- zhS<0|g`YbCuc?OXDVm!pONm%ADS65i@wy@0Y4Pkiq;y~t?7a5LCOF984-|@dZ&RVr zbiwl%qEaIG0*wvO`m0++wl%X=LNj*#U6iSPi_g}rZFe)EPwAl%`KuqaBo}GBr}CFW zjuVVMPO|Ppj@|x>BHC1D(h6AuAArFrS>G^cicXOWH-voY8AboVcF{44B2ws1={u2HqR75s8S-~l|x%&PFJUXQ&7LVR~c5!t=9$DAucnM z3fBRiyO!--P3-DHM3r|nYD#AZt@&#m^wV~Lvo%}>nT;;+6qWWU?#Vs7*1Bjv5p*j= z`xDTnt!>gyD0S10a_s;J?^zcwOw>XYWrk<=yEA?|Pa}S3r^KUMP;B);I(KB-3apt| zUw`%0=gz@6^9u|kH9*(6R!pt+o(J_oxKHTkFqp?b2;&PuL^9gE? zCpH;L+;HkQpz*6BF5`*a0e*M8=GgVO5B}UwOm=>7N>)xuRf~;xTRmLcr$Q1j37MLz zL40v@ZT3chs;8M6lZJKOxuC!nsBBxRHCq(AyCV-i$7W-tNcdjf|Vf6ceED=A1Z`93!~VApkh*?1yw8-9H+d3n`(iBHv{Js_i`rIw5I$+ON>Ro_mjbbVn2(rx&mv&^;#jnOu_TWau>k}fK@3L^?=MXGza_- zH&QIj=C@SjJ5VD+g8hk+rH!`RjO%$h87E>9m~rR!yi~8wCz+U>FQ}=hG3vDBlTr(* z&QrEz*}3Ag5tYfDYnJFzvU6JSQ6loC8Ehg~<3;*~Q9mP^ry~?b-C}Fk#dJw|W^AO7 z%qTh9pseeT9HD7u(@euTn0w!BsugZTF#g3yEiQO&guk*;qo+34X>g#>*kc>@=yqsz zIj{<9%$A;(eXQ0)=iD$R2gSS@F1(=3QzkPA6~zjtb`+!GIAP|Of=N?ZV=2K_D>aKI z^F?{Dx__*tHO)IMxm{1?A|Gic7d!`j58V~j6XZnC8JCthmMo}FtRoF5u!L;Ls=Iw( zmHDniGALm{EBD%lA~iDJvi{q56f8k=x^#B20=hrT_zG73{#x0*R*W_k%G&QZ7uRme z=4Ttm!uk_+N%yZYo7EnHV1Cp37OpV?`|k(F*|gY!gc~b zYPQvH$PrTb6euJN4sojv{&3_quNt!2fCPx-b%JbPMk?DCN z)6s(Fe9DxDKMIlhpoJA(WJA93>eZ`PUw`!#{QuRfS8Mf{`m0x8ef4i& zfBE(AzxtO~Uw--J@4oyO@@jZ6DHW6UKp3o& z!rL?>VQremd&^hjuf86?a!~4~XJBwuV{M!W$%Har`DuNG{pL5L(dah;3Ai!3*rR4S zh9Ntg_Uqr^oOxR+Efyc`RJ&wTp2Kwc&4Ou~QcYpTRcz-t-x8k%ax`Q4*cf1wl4mKC zP`LZFY1;U;nT2Nm&P9I57ffloDBh5~%(CBD?;Z(T(b&%47=>cKjj`sW`5`RM)sbp8 z0~iMWJt7T~4YbFNNZ(_ra5Ljd|8?z$cUx3uNtZ=XVjM0X=2yf`t33a!VYA;hVhmOin3!-*fq z&M>?YskPzF58#RgGn1rTC1T0s%9xwgc6S`Pr~I3kyfb@+KD!sD#e`+5eXV?pu+L?q>*7nqSNJKj`n)91v+(w3TbyFzK+ za`acL;y1V#v^OVa9{Fu*mW35X>Wa+awQ&728*xAvL8cbTjMx%48yl)jB98lDCm0yE zo%MfbGsIL!3h=MyFLxrTdR=8*z=sZRRUy*%X80w_cXQ9Mx$t>!m>(E$D1w9|1(n(_ zC)Jv_+2q#dZmgR1`uT;75|v0=Fxc~Yr3f(P;A95pyIh)H51d#FEVQX7`h55ks(5ly zw!2E}i^DhWag>r-P%8~C!Z1+=M?6^mTKH(AuCA;le>m!0{O1lp4zHho;Xb(ww6~y2 zJqQ^eHgUHBLUHtD*76C_C}taPu}v)@J7cY+oqbx1N# z-E;S>GYpLkL0uir+q>lx%bNM*e3`LKnK3ZBJ`i1W&~e}B+2ag$pF7Opwd*j04>P#s zTb(nw_mK&sl1ZSry^Rk!y7e<*VM#*)pJz+6gp+0tgTV#%GwhkvI8+chW2v$j0jDge zLl|MMJ>`(7UVYF!wcKUgm~j@a&~Vzg3k0)AQ9PfLh?)R{6!hn>7@}nIG?48*Bewgo z3hfG@cV^E;E+lvu;vmp}sh%gvTnJ@%^SJRjR_<|d;5lf|8m(r_|_t>@nJ%pJH>7_ zg8*w=t!=mZSl~wn9mMlloM_*NZ((X-(AKmlGKh3iNEAaRY>L`r##EQ7))V=)&KVVa z*MTT4?(bc`S>t5S~H&>}$H^$k)2(TCsfp>xJ*TP{j$OIr}A}5l~*dw~Q z))}adB`Y$TbTS7RB(%__WO&MmZeI_VsEgybulFJh z%ZJ>Tu5rztw(k&VYU*P!jlo4VLySw)c=fn$(}I8R3Dt{j0eq+1bZBVA-P)8=&H}cf zByb)DsBoj8t+8O<;xY`ot~I&zJftDdk(U-83~B^n+rhK;aKi|3)Jb+=osoOSY*=Y3 zMd1)=-Li~I0T#V~jmwLJc2>)&QER`vbEij@hQP?#>fo`S_M7@ChmwGW5|l`u8nGjC zvHncleA|6=M(=ssWoZUad)Ltv!9_H?iC9g0N5Ozbb@hI&LtSJy`ODqi4f+1#J8Tlu zYRZ2{i5O zG095Bmn__}+z8nksNY{>DIQnf%Q!3l1|I(Sj<6XRh-e=^t`I!^=F8Xr^WQAZh_X^u zmQ3JrMf6;XayGBGa_f4_f%|~tZMFbUV-VLZ;kM=au+KQ`GuE%eKI5>@IP5bH`;5ar zsH_;DATC>XF>-A9?LTE5Dr&~w-+Q~vPMV( z2QKymxTEk`90y9{J?+d;w^7@-?ya!GF0JdpnhvQgiO;v*#wmvjTuk=H0i<&A`IUcR z2gqB=YDi|~-h<_8DDIS6ITma}2IT3C$rT<+t=7k4>#inf$>%jip9~;I4paIwnbP}6 zmHxu#bJSyI>EF?SupvrdL;z#S!Gt)N5C?s8Fd^Eng9*`j9j5f>Fr^PB1YSH>EGZ?s zz30UbO!Daf(pQ^k$0AaV34uYq4ChU6C`p|B?%ApvEx%E*O`Q~2I;N|i%FsE)ynJRl z=n(U=bJQAwDh6>VLT-O8$P)C8r@-{VXpysWcgoVOhyo!j`>r~*=?lresXtA$q`3mj zOa<~^Bdomh<2x3+FON?ig$d$8#HZwTvtzetVO}($evqd@Po($0x<{m zcd!kJM)Y*omXNBG39R94zUz2GZOyU(vPT6plr{lQEwB8UWDEzv`YT9O;$rw60>)O- z&glg*kzfLg>JA*sU3|D~qkHW<+*~ISX#KEdQt@)Dg_Yu=@ENAl!NXVTk}bN2A?F6G zCj#omY3w_Q%%=LOAV#ojH+2gs1TESKkZrpuA>t(vT}3al8YS{QTOBK#v_#};&I=^a zu*5_TJ@5k<39bkn7uWe2`5?6U-_;{mrno#Mmx8GeLc^WiS!&_)(M_}f0yCU))JLEw z0*W4G(|EE<99)s>+$^59Ek13OvPHPXG#B}ZEegGAEW?UG$eIK?inB!_$2k0gbW8~B zNK2?R5DZbxW;`JaCTEZVCYkS;pJ@=0eHYkZmjJjCsWE=FeZIp~=&m2A!7gyh5TtOg z#5OgsU7A6+Eofmz;{P=&3de*Jy@IYsjY`L9?=Fip0y|3+VPuSdF`F_^F{G{_Fd8ZpHfWNaOzLE zy7@o9{Qd7=uk(Mte*JH+4*Z|balIh^&uWByrqrLu~&}`57d5sh7Nq9fo$su}Z zA)Ep*hB7Fr7T7xcCO31o26a6qvrJ5A=3%|=V_aeGsut6Y*F%adH;n(j(* za<@4G&6AUiCifHZNZ2FfRFaJuPJ$3E_rg!^g<7T)YebA!i){1gphF-y4EmgnwHe?X z-S}#gglaTScA(V)P|*p28;P;qQYn&nC1VFJpOTDCwRK_)^l7%osy5^s62Mkt)TbL@ zc!CwChy3{*s`Yc%&a-vz-Uag?1oE#}$qbq8Dpa^(6HjnN7&0c?4XZwAE=ORwr0Q4JUtiEg_c50azKgf{RkqM zC6#(_>zM9|V(r9;(cl)m(EhX4Z>g{?+fzK_8Z{Tnu26}{6;F}wxl$m_L#yAL9zj*z z*e-nBH_ugevWm%585!oGB8d#7|1_cWL)sA?PXDV8sE;%|1_6QN!s-VE)_+Jutq0skTF^8pYVY z8|bF4ja|g!z*db_dq&WeyP~GSO9o*L>m0nc3DMXqILloF!wmYtT8&kIVW6$88s87% zYHa!e%oSXbOWxqViC_Bm8*b7I{;F3mBF-$X9lfA1cMYSF466_!`2$!ia)}#YX7u6& zTOUCGZWp0T-Hmb^Jnx+yYlr-HQx*78Rn3k1G|mmIgmr4PP~P+}wpq{g7&G|MVpeS6~Q4+we#I}UEc@H)5+4{pQu>)V8YdiDAc|M48xi;DlKkFd`+3Z!brCyWKLzrxL0K#2XJDJsNQ9JN*$8&Vb5 zA0QHXIsjt*R%GQO_jTVer|j$X4XV|ze^VmAp?dy?jE&sJ_F4NY%KV4gy^RH=;foIO zZq{_3P!)tY-mJ@Evkva9S|vW#Z*FdwMqxQ_OpZ6kQ?1a88LO3-QjItE5rpGB0Tpz! zcC*OD%Ishc(Kd%@n?tnC*3mY{8{sxOP(Twlr%NuRyB|8}vQlX#+l1pHDk@T^Ku%#K z#g;NN%^J}>9ihEims`7=u2TLN8I$@w&FRdn04>hpH6_vYvh$$^;VRT|cF_i=v8pDa zRKtSnK2SC8`Y{M7f-PQAKt=$kVAdXl4)Bo`VplCc~r2-V32;dvq!1=Y}m*4EDi)Z5i3C?E4o zfK0HG4aZL1uJBew$8*ry_3Y8xDeQhR#C1Iq=q9OqB5JzsNQbUir6%M-hYMv*$JdYY z2I}~=bHKgmcu-UBXtG(!xBU7PT0Zi7FIL~LAK!t-ZxZpZNaeRT|Df}`r~g6cf9^W} z7H}kkwSDci{pfic#=oxMZ<}r(?Dzcq3lvyrxz|MaD^lPM?LKJk?x}px+@GoDp2A5; z!vL(u!cQyZbY>@LdaS^cnwFWF8;Qu3mXzni33+povWwP=Y*gkFHEGfG&XPYn)EQ(2_NW>2z`3wGFstu~j zw9>LEDyq4%T-?6hEC`XQmJnE*jDj^&;*3m6ZM3mbv&mc-HA_)^Q0|foj5HBBGh=mO zCa#^%LGg$>u3qDcEQHdPlw3@RDF$=O&l#CXh8}iRE=-{hjL+s;oe@4}W9TGClX+0= zf&r&bmU(3t^lS)uKJi{^>rp=Qb7Q@~8pU-+m`=v0XT&J~QX83=I!;!EYqO5yD&-Q* zGY9ro{vZNj-4MHzrsnA*Y}}DE9ZYc+vjy$5C@pJNNL)DK&2{wnzx$U?)M+KmIiJm) zEUCoBvfc+fz$@yI$Q(m3g5C}KK{L7Vt%0s`q!*^9d{JaP;o9~!c|}g3v$;N2Fz`mA zI31G;-BUCAoaIg}6-_<)D zMYGR9u3@oujZJZ2o_dpNlWwwtUyoI9gkW_Ih_&gI?aK(R!)r*-;1wBY*C2pMahQP{9GGz*Jkb-aV^q&*OjG@mi@di46$s{sLgzxy((_5A^SaoOsseRStzD9MgT z_7vID<`LS-gJ<=QcPiDakQOH#tPi;W)?Gq%&Dp^h%==YQ30X{}^9BZ)=FI3AZ&!l^ z4ndGm)zv~Q%vX7?oeDEmU``(7gpFrol9tR89w12b6l<$iO0xxc6@`TVnQChZhc;}< za$N#RDqHeI`}P4wlxy_g?n*GUK39e?41M&h%x%`CHYK9l6aEQ(_}#~+;1L2>n2^@# zwpon^XIKLZ1bR#t0UHE7b0-RsiP4*P+_eqinT{PqI}U)5o4io6bM)2`w;tWAftsrRMB+BCS}m?HayHQL-__jqHr z=M5}Gx)l(t5NQVv3|$zB7-b@%+J>~6SFBe}(FJ-{&d5JR&JYy@fLyU)7GPM2^hBMu z;7krUvICCnd2wWOs<2;iDQJh41vAP_-A|4xGaYX`4)FX{=Qt88beFUAre1SvB_j$w zO<76`CP&!Wf=s!kPF)*Ewx#<(ht;bykn*kaGuBkqr^AYY72KaWs-;1W_5AN8lPe;Y zEXd93Nek?$1y>}Y3e`}IX)$y^u#G2iqd}j5YreV$no*wv|9o`~-`eBtj`wf3#ok_LF7gAVk}v^1p787U#}+7A*PLgjH^bl$Qggcn5@--J4!e370NP)5NeUTZ9Gme zWCp9z&qIB{j;iDn>^rGO8a9hu5V>Qs&V!Iuo4FNQ#QoRDgK(@A7{mk%V^~xVWWWO{J8T^P^W$$P*^5dMrSeFb$wJ0;qi|X)1 z;iob**{9YiTAw}UYc!d!4@4*r4qFuHG3$7wS!S_e&b~+QD&L>e>~cMSI%v=nqv$i! zHAZE3tPr>}Tr%fOobe^gt7YOuou0bg@m`m&Cs}^}80h3b#VkP3Aio4v;;R;*J$MHP zxznD-mG-P)yievn+a3_{g574%iHLYYSK0FeBVM$7?74vv2bb8!b-*ihTu+Uqi+6Wz z@9G-ZUk4Z0pv88;2#J0)-mUf7xwJlEa-?_{*1>)C>D^b6BOuOu66dnoi(4cD%#~}t zJ)kQbQEjL$x)eMmMX9x?6g(ii9I~q>zyrK13f|QwvjfKKfbr^T^a0~_z<70B2aMOo zb-;KvHta<^v33y60poSRc*VYPz<3=nUN6uAMh+OS1IFv%Bs*Zd4j8Z2>wxiU#CX-* zN}mqsHM|-JwATUcbwGR7uLIiafc83|z3SHi?X{~o4rs3f+H3tfpuKu4bwGO^&|WXV zxphE$9UNJ|UbI(Nipk9A^nAA2kcM3^NtvcRGuFV2O4y&f_lIVcg!dLSr!yv7CT4w; z5IldCr{LdEwT!|zO{QGh9N%7FjB+|! zetFb2>!N1dgi*=low#TDw>;}i0a$C(H}&^0{V=tTswvNy^>b-qnqzcaPH3s;EZ01- zurW~+#m-G>Pukm1f0wy;V>Iy`egoyamnuO?gX#ruG*xcr`V>^pZzP-Y$LI>-<36eE zU+(U1pcp@;BqK$s=8%GV_X-Nxd4NFk)pygLwIR}DKZ;$F>f#VmH_u{Y14Ab7l!&}b z5RaoO8}x|hdL-lsEAfVCS+=JSQOTSQ7~;3^cx?twx%6tX(@zs68qF6>l%4%mx4iIq z+;|jBjx?1srt6}#g<^fpz}jbpGlj|ar(=BT~wB}@(N3k&}y-!0w z3`H@()zY`7wN~+SPEEfDxpk+n?~z?oLi=jC4$Nua)|j>|_nX%c*IBINY>*#JK{Efr zGyE1_`;4cz-xEVS{A33kcTC88+xygP#d+$UyJxF|I{2ll13R{VZMq=((S-KxCdr0n zZ8jD5@ZK3P)^8T9AwpORnf7bJc>j z@F|DHIG{0Tp9hZ&8X@;=wGWXaLgEui{GP3DqgI=qm8PjZPiO{5t~AfT$_&z!v8&53 zbY&SNt061Ov#cgVR+9LTo&FUhdPz`BE}8rgF~(~0`d+~?A6bI_MScyTv>6Leq=h0{ z)F;3Z%%#pB;l#D|i9*P>Z%1Z+YlfG!SA0~5acunFi95-}!*@$2Gg=fppM8^w$pE~!yN|ejgsbvASfwo5l`En&&oaUu3nux3<(g*o zCy5dj1rNk>@f?szWs!&llG|jAs)j@PuGvNlj$^&;;IA%+o9o-)X%wbI->ndyYX&?* z;!8azruLSOh$abBs`?^;hBG$R2(>#48_N};N|g(oPO{96dBan$yOXR`no0N$L^`Eh zB@j1padT~FuQD|d3kqG3Ap+lQ<}4uXSUESEvvbS?i1#2`+{1Bj#t7Bqd`acGEc0_* z&(zxLU_QqaMw3L8xjr9?NSaW^lZ#T%BhX0SxMxxHfFrda2oQ0FAYm9r0)^L*C8Am3 zMo?N&r5=RrP{f;DRk&6-cpu7%EJV(=kd!cBUNeM>~H zxB{FlfT#U7O-OQmL#6^z`((1h&`un{u+<@XxtO#-Cwr>j1F(yM*2N04U(a?y>7Eco z7qzV8Thfkbq>=(Bf4U|&6Q|x)pTjPCVaUQXLe^{h9EapV@ zsm@ZLV-h~0Px~-wU+$#c-H-i3XKchneeknq1)fE8e>d3dNnvIObLC*J{K7Qf!CX0* zD~;D-(!ShD`>QrrdI4I`49$ud0z=?f`vhXe^l~rw>Um*T5z>r+T0JGm>TqcB3)10- zLyP9GbgJMOK-B)BP`zE%S@SOZkg3bY?Jn4moNz6vTzz|c0WSDEOzL#|z85>x{-)hF zmJ)cYaLl+{pytVhB3}Zqz0^-;6PkNSjr#;Lii-O%2j96#eGEZz%YU*BUo!ct6Zg=g;1$TJJp+*O_IgQgfPRK*cP|94N9J z&vWp~jYuukYVbXvM-E>)XF54A{9EVPxCJfL`5*niN2Y3j+8iLQ+(w-fxg7)O$7r8q zkKg(y9Vm_GMnZ5>=BWgO0x?ltYGG@hQE$d_^DmYLk6MtPr57r#VzU4&Oxi~CH2{Q} zD~~JW7!CRWXo$f@WAM32km;?OKxSxj7|6E z2OA*ZpcvZ<93QF38Ci1L%DoALDk-G|Q`>(DS8yuX8}f5CqJBOjKjT0eNtOwkGWjzS z82+s1G#6^5c*+tge;$+XmP|^X+5=nHZmx(Cv}DYw8Ih>p~pWm%gY-b0ERVW9W1656`Njqoc! zps~a;`5?4STwpsFrW&-VdCXsZxcm6OZ@# z75zD1Xn$;od%Kp_>T;DYf1q{7_PnuT#6AGQ50x#ntLJ8*7bJ9oG}Uq(cx~SPbbWdC z;qLm|>#L7@@z?;b?GTsEminfU%-B8^*KYGbSsx)b8^X%(R~ujZwJWVlHl<~THr3Hl zgXOsG!1z{*o&?2pj}<1sb`F`QzB;z~E^1TXE^x@PY*#p>@88q;nBP8v4I^K8J|>o7+zau{3vs*) z16UWJ&xX&~<7&!c#sfS03X#5XyLLquOjBd2jm@w*tN#g}vfv(2zN_E{XfJeYoI%Lq zI2S4V042encE!5DI6nJ>rn*$)xlsE0#{XG}bp0>Yqr}Nz#S+J)-6GF5PHmvZrSQ*h zjd(mVFUPwr>l&;9KDxnFkn=V|5q&u8tA z;Y`N;= zd0!i~x_`T3x-QbGWGvNf%VJ7i_h4l;CLV$X6Xz0DZHE!|D}BzlBT4ay^9O9`4$HUB zN;a#N>F-K{MqWm`XT7xc1>BzUyzO;wjoRuM3y4**NQ#{;Ev;qvgx>!NmK z^);&K+!bf00GbKxnIG!NtNQMAQ{cm*r{78&=@`cWM>v4 z)~szewSC~h;9c0iGwyNBl11-1Kjt>V`|izcArhS+$II05VD~2id1i$yoSc0A_;l2N z@u~?5sYiv>P!+%Z9aB??JA8_ExZXnoEH4j1%S9v&PhBcp-lq#D8C$qWNjmhy%R?+l zQhp$KIQ)t0el@hiVSf?rg_@8|x$aKnY3!8DVP>bNti4`uW6Hg+=WS%a&3-f^L`rm! z0fGV8Sn2WI{X*>kL`bGcXgYpxlrE;?PK7|QV8M#H(Ratj_zvDOm=iv;pp+)U09sxT z>W+0vqBYz^(!>gWP&y=xF^AvcurE1|K^KtS>UaUlz2|moGGJkbcA0>n9O-2s1-zoz zqSuA-!JRBf`gW^kOEG>PhdaTe2KjT9vHf++&Ys54)YxEaBfpabx(ZohrgYr7#h=Ao zXp3FF1eWmQ@dn{7%+PRy=ZI$M^oNu1C%G7RIa#jJw6Dwl<2S-kpC9s%APC(>KP?!Hlp=cvF}auL;)~ zzz|a1Nn@OH(`F=s-EZi4@3w@OM2T${-1OC-7m|)ZE)xwg40p^IDS1p4!|Otv}KP5 z^N#KYcVgWO)Shas(ihnVt+t_#l+?G{W zU*^obz1?ywesb8?p4H;q^xBRu(`4j`2=C6>$usyydv2aI3f7tvv>WHEHQej^!8T`- zr;Cg#M{>qAl2Yr$%K+Mnd(X!*1;KpVI%LqkrS}%>91l9?`|O2SXBLIQv%n{UWG-IXL^D=eJhuRkSTo&ENSz? zoS_|-tTiSHSH>=Ay!xH_zh`f6jDCZ#&i^OIz$uFVckTP#Hgii0|Nf;$x{}px@K`sn zJD7Zih0L7Q2g41W6eQY+oX2Vcv!7~Fi#WEb7Ih>D>sgWT{)Y-%#>7YP%VQDjnn-1% ziY{;aU_q71t*Hq{Ewy%GV&VQ^UAWAB@r_%O-dv$qlcEHjX>%fo^}BN~@YED81&Oxu zzSAIQH6Tf$Vec42imC)=V1sg&CIk?vBR{|Mj7btB2*#uS;&q#(i_!$yt3BobV*~c| z+0-cshmT*Ax5<>?Ssg7GbWdIRDc}Il@;lk0l}3O&Xp17dWB*sV@bD0C#OdHt46E4MpHlL{#0_c(oQT- z-sJAQjZ2r!^3t?!G4wDHTdq+#b^e%Dx=5l`YColKw2vAqi|e}V#!&xP6gZ;n_oD&I>JQ`>m|asVDfsq#m&lcg)0fuoGt!OPLk#)S z{w@0_P_|s2Bn+qUAo@l4uh3`k>f)g(CeAGPxP`bF1zb+X++c zQ#_R~S@Y`ZV(TjYF3yj`iVtyfB2>4cz(_X=@vtHYQO z-=_9cEL{+7I&`6;Lh7WF`*rF&I1%5Cb7?u_k=y*l5pYY9mvB|KE+5P< z%u+VPc7Fj+ZmeG8Ag;}?Ep$Q|=7Nap{umkSsyn^!=jDnfCB?o!Ijf2jO9AM?5>0V& z`YVG82h@DDI7d#0o&W)tEV8|{u0pP%k_u4r^8wFOgUa$G4sJA%N$%DC&$;Byh3qkP z`BOfl9;f&b=sUD0fnPx3(d_1IWQ|j4j zq8WL+(^ZsPmxxCYZ{F}bjm)fQ!*un{^9tjyG*E0=TTZs2%(;WNsVTzRpcv^%!6k&o zBZ}7fm1eHdXoPK|nu5=v(Sck7M4#T|0uKM*1t}d!~LGO&F_ZyQa907gqtQ?HZolVK&;KcFPvUTqmqOPHO#Qlw%zyEtWQgm~xLW zj(=?f(uXY=$k9V=v4@1ZtEN(!XEAtkpFa|Y0uZp2fdC{7MF1cvLp})CjG+Kz+w6ZA zZkY2y*yfA`AZNg8Wi<|vDsh8z#xpUdU4YCJR`9YqLj_H+J}GoZYX`YM=JP~ z%fe#518i|85i;9KUCGNqnFn8G9Lw*M^YR z`cQ76mbipXvXwj;p)ddy;MYV9Vy z+M=v^T}fxQ2ma{4p;`J4?RcTEPa;#oc(0YI)}V=uh}F@vbg5>FYRI_kXhns3T${N#|;)WSYtesh!ad7XsHlc3wx&HKqijiisNA^XPl3WBVc z-ioqPhvR+dEYTjV>ikpdz7wuqTvk|K$tOXl%tn|-kyw|j@nj7GRuqZ&FPs7Ap;ycH zAl9r#4by<#ietnBf7YU;)o#R2X$)!xx^mAY*V95!iLA(RBheSTk0C_{!b&D#F`rCQ z2TY$nxb5Y!1BSPdG=E8k9lAg&tf8A)Ot}vfb%|!qEkoINQt5Lbhh0v6kuIy&dpQR8 zHCj=bD)baaOC=+J(T4dY!)0`H{wZfHRb44_70_-mV~szAD)1f`H^ig^580kG7vM?1uUpr10!u-v<^M;z*_Bl|=!i?-F zkPdRX>1hH0{RpWGx_*X(@IQ9H(gm8a3E)1m%j*NGK~)A&bn9u@Zc{Eg*sW={r_Fxl59$!p@)>JKIwcBQ9M!f;3V1cC(pLomNkHM0Ov);NQmN43cX3WrOy;s^K&MQs z;8Mc^ajSs=Ov&^)(xTZGb!=g!aVi+h<)g26N>4$!t4=sWPP1fTR`Du?*1tK^KIjZ2 zd!R(94X@!45ttL(=~W}AsA8wIK%4#Qqg=dfr{2>lwcYREh~CV6fi!+g=8-b-%)-i6 zWt)wtSdW*)KX&dlX7+6q_;Z?z^5?yLK_w*jNdQC(f5$vhuK1V^a#tmKGd*^071xMf zRv=VoDHT$ZJ3UJc=y8S386+ZW+&Yx2@ zQvS(~Pp8OXkNS`@-(}1(SZofH<1+KCnNQ_cFOR@@otEM_uM?TBj@S%4`A!kA^KYkf z>jvDkOB{FqiMIHF%=+-nT)VbwOTX0M`E~nk17sd0v2TT>54pA@z&$H*S6-@#BK7(Nw()EFw!1_ zJaiGnWTeS=oGAmomJ9Z#t$F1U$a03ICI3hJj&eQ2_n?Edlh$bOO*1FQgB@OefhgjJ z+f3l|h2~DFJcJ6ig)UMAoSN`+5Mn}V5h7x)1jF%^5-o&Ez`*u)A>cS$L%^}>xU^0I zV?vvB9sU{Dt-WBdT3fsbo6o{9*S0pZplVN*+;vDvO!?acI0nXUlz7<)RW>i8fgSY*$7E8lj01 zLl<8mysA3U%pLO-t^hwPD)cV_s9gDM>h-24vLnifmI_~SIN87VIhv)Sn2ru86DxYm zng$x0U@hkuFqNUwZ5|fq!vc&|=2*vn@-)y=_VmhWa@lv05We*_GQ7?PGy83sJ*B`; zf3%jAvxl8y;|7&oZSn+5aMhqyYdGPmwhfxjhg{W|dQs#8se*|~HG6t2I!GMA=7~61 zT}frMsih(wW#{T@l$DUbJpfZRGFU8tM5rYC4l^StM!!J&`XBnww>gup);U}WF){4) z00}eMNvc{~$^}HC03gKAa6s?pi<=g|U96IuJIcI$_PD3A|6|Eufhw=10c1GZLN|jQG#=H2?nAsaT^8o6wyogzFmke%q~EGq1Lrr@`)c)zms=(AH#5iK_MfS80ay z6aTb{leE&Su}OqJtmvkw(1!;JtDk^e6p;WVN_Q>nBza3or?gz$Z@P*EmY6>4g9y#u z?%Ag9;44U-L^|k%9Jl%!A-81qHgj_uggcE)HXd;e1!ekz!eyV2e+t()#jIqC^5;45 zO(JCH-^bLjmJf8dcv5W5U!z~gb%S(9hgjWzjDCW&W!9jxO0WCEtS=vQjl03Nzec~o zlfc!dE$zC>zaTHmY!#HY5_@MJZXc_``MNU;3#_F7YG2$N1|7Jh&|G$~R=b%hmrF=Y zJ>HmLFTg)z$*u-?kCEfN`2(1xJdN6dF#AnjT_npAi@VnceJOS5=+*Q-)u1?b5c{A3 zD4$TAVhAYd#wN!n(es2!wyTz$>K?tZMUb4%1?Q!>uI6OKzw-@4Ujbz~pz zM&aB*B*lQn-CQd+9K-x;{ur*NkG22I0UFa3m$kPIq$fKC)n-m z1hLhwjj60l`Oc!ubF%gP5jg?vh+omyBL1M{vybcB((98N5;Wd?ez!x84Zr7=bEb=> zpVi*!^!u%c83p~D_XTmYjv~Yb!r8;4KYCUOs5>(@OEBY?8=n_jyOZ1db&ndL_Z17L zYr)Ruy@Uy+rBO!7)X-f*V;mYl>mc&uqfkp%y zX>vhhza^N5Gsuc1a&}2fu@VIarV1Pi;8*gNJ5Dl99(J&i{yw)r%i3qG!SHV533N{v zoUFcuR&X-=u5ef(uB6vgGdY}XNMT!4Vz08?yuPon{Fyvqbv?XlOLxrENdnEI=XSDIH>P;nq#FsR6{j~MSwP1D7_c} z`6GSGJBUk(Dy5s2N|WHJ2gLz6i~~QfV2@bMnu#-|aWN1lra(T$@Ha-qWk&31%`MdP zQ)TelJc{Xf_ffC8dRA5k^x#eVyVCU05q^lP08MP~3*l9?tb$`th99aJ84xdw+~ttME9J23y!yPGgG z_s-a8luEyQd~<0f!i7NqrAa0?Aus@aGWR$2(ypt+Jzc%uTy&qe_o#{rXH7@AlW3xn zpocC;4S7*iia{0NnN(-C-$uSM$3b0uO36s=EkUL{l@s+*_jb4%TgRUxs!GmjQk~2X#?+i?(+ty9 z8pW#d5^ecEKx1d_bBvtr zO8ZTBH&aikeb4#}{;#U`D2*F$A2Uf_yWQ%Zw+ zYc@J#XilRtx$H^C(*polPuT|rP*&NVy)04c)JqK)Wm~5Kw%g-h|LBZw!cIJm2(5QJ zjjn$sG@?ddqv0P8Jt0mVox$JXhehznDVulFvSy`VY8gHU&jtDk^x!9G(=8xUdVaKu zdyV0H+Mfny830&r68t-DF7=g3=zLu2j08;1$8y5&qh{5P5im=751vW~+=8=XbZCzJ zF~p0z=;(~T232Iz-4a{gkI8VUuVd$^X^x`_Mev2daRmyC{FqbBH;?`cw!hDu>qvK; zgn0Vms%oa?E3$J51|%eATo00wsBn!706;B7ML3>cz8xD%ok|-)Bir1G6tNC;+ilEL z+7)}{fS?yIzep-c3PL3X!#tdo=58Ixqc_J*CoSsKL83f-Mq5y_(0FAQPs(BHF@Uv) zw@otZ=pTwIdP0jrZKuY~hhK534EN7zG%kTyWt!XUonP&d2^>58aDwVMXtIqWNt=HE z^_7*go#Qzd1H?q7O#s~|Pi`!%bR$k3cI=dyqT|}DI`9N_k*V`Bb?T1IufkC=c?2?Z zB9$@`6xG>2E_}2&ZD0<(w>U#?&nv79F>zv{=QGX^2XRI(fnN-k{Ah|7Tb4s8O}6q} zYuH;(H4|{n15JszP<2K&p4Y)vU>JRYr=Sg}BtdMQS+2a`FdQ=OE|c3$aL@QcgHVd) z3p@?OLuOEUD|Zkfwp+5Jwatrr?R2y1fcE~iQf(oA5X383+b*S>$c7c&XlLlr+}^(C z+w=YA^Lo1P1NhQUZN>bc26?mBQOYk#-JZ3>%NjEccaL*sXdAquLSz;I+}4HtLuRgy zP~Hvn?Cg9lPW7@xQH@b0IopSapV!;l`TOJZ%8DHC5RP%l_5MY|Q<=id6<^i|HQY>0 zGpuo~GEYM+!>+a{{s3O*y`hHbvTySArN6gxyhy%)3(&Hi z#C34;!7}|tH{UMgD~^d{N*J%eLku!#N?DJScjknP%e^kvYc~4*N{(N|>G#5>#$tZmQR7Ic)k1N+{2F3%}bvKwJJy> z+nUFNg205u=^{GRyK(ipM%SvJ^UFa__3p36<)AJJ(WnlHv(r+#zbabri;8-Wg&QeF3uu_qbNz9~G;XaXvx2#Bjv z-lJPVDg)bhgRx|9T~-qjLEV<#iNKg#F|#uwnnF#S{e{|_Ij^l=F)jJQ--d&C};$JocjTl)6Z4U32)h2 zNUCwE1wYu#T-*@9G@FRKj@XHd&^ta@Fxo%&rEq3=f8mYb!Vkg)btP1f>37y2$4C|{ zEE$VAdet3b1d0r?TQCm9)`A?AV))M4%~vM=ysNh=>$OqW+B4IAjP}TEUxj-{j_u#| zR?a+S8Qdh$^h|s)=Y5qST(B}YIG8%>rSz}^&ScQc%CiXpr0J|KvMSCmg&N4ICX6lk zey72qAI@Vel@z_pdlBM!gu#71@@S@V&;`epS zgRnW^gJD~lMel17-20;yj=tz*ta9Y)MaA9T?NyC7B)0Dm^4nMBcESirPe@mGLN+Hxfx~ z0KIbzbW@Gg-1eGJyf~W=nP{8(q-xr>Ec3BhWtwoC+9v*&p4ONmdZl}FB}Wp@s?mi$ zZSm_OiPXluOj6(qf6%$dAu(Ay|M_NOD#9=Hn{Z+(YHjs@YG|mPKD{qCmGHw9_((~& z#J726VvfVIbr)tk*K-PeqTm+(c<+*+=iICYfnE2+#9DBt>?kTH_r=-&+TQEV+0&Qx z#A<+^b!lZ{wRZ9BQDQB^k8|BVtCoC|q<2?M6T&>Arb-p$3z2(@4q+aSaO z9l`kwG`X^{ z#H^yxFHR{w9BvF#xGlMNPt=>q|I?@-LvPZO0)ClNphJ4iA=q {&J+FnfkVuX(S zzj2_zx_oNQit0yAz1eH3;7vn?8+p*;BSuzk_1zD;k-^2(H7|^G-dfp`qrRV%u@b-V z0&!s>zP-RjdFu-U&h>=FlMN>r*L+!JLRnZqS*2rHl%YkdSiwxujnnak%l3u)@`da4 zhgCGce21PZ^M?;nko@W2wCk*`*l(G8`*cCS-}uus`pT*UY%2=RyY8#|LzD-0pv52WYYhupCY!$2H|{d2E2rF__PHB<`r z#4ewG{O_)b(#qgo?CmYrm745MaC1{N^*Ke$+S|o6RfR|CGj<1J6&b;^nNw0D@}$dE zBRk&~?SAG=4Zp$DPjnk8-vQ2_b2*toXEN!=#=!lrbLR39Df9mq`iov62QRqZzlr?%OLMN)H+0HA0MlT>mYk_dI`=on z+ssdB9KO@1ABEWY&*>Booj*4fHTG)IUu6aXIt;`qjT*?zI+Tvk99q^zVv6$VYOPV{ z3Xhg(?m|qog0c(U zP6w0ZENkA30YBI4npD)gft2i;&Mf&LM~gInOS1kkn{q(S#7cALwCM>DaLJ%J%NwfZ zn5e4)CB7Z=KGdl!OyT3kf|_RE+hO-9WL2VX4J3agV1M>o*;e4q#usSLtDP?-H$_sJ(h! zVl?OC7GBVnl4&k+ZsTcfsKAs-QVGI~s*eS>t}(gK0`XFt*%WR?9ssjLy9bKgJqwlG z+|}30725V3KRI%#wITFxXz95;6A%C=2-z>NBv)Awnu*iVB(pFj(0cl1Z~29Go0AkQTU?zPpY)X(Xj(xek?=`#%T3QhV(Y9KTF(+Vekv5b*UkK>sUZUFu5# zp*D-~L*zu%5~Cd|g+Q2a@Eoa|OPhL_pJ;nFdR96F^_`J556-C~F8PA6uxCyyL_S{JRm~@eRD${v@?^u$-&e+2)99OOlt%GdsqVo? zPLISFDuR)>?r3!&QN0J{21>bO&=gzQixCPVU=Ds&BajWshWCYZTBrYq-Y2Sw&&ZxPGV3@3C6w1@iAo)s$e36iUFoVZjE zFUaKd`2F$drJtT34_`Nj-)0Z!#swQ`Q@(QUJ==V*8Fn0YAcw?ap~^VWX^|A*TOZVK z=hj@~xqNDJjZ24`G6p@y=NSyWdFqDd&Lb&Um1Kn|6brBKpcYc@1wvV&S#(a7_n1=t z*`L8Kt+Y&=UhTdZiS-^SqeLBa2BWQznX_!c{3p|Kcx~|^XEI4mIeiJxdM?D{LY^LR9>JRM%xf7yco{i8~b1m=9scil^z(? zd=k3DgqsF-V}||l_$tp9F(}oQGSHxM+Web)o6HtVwWXR2PF=j{O_5*ZUm!J&qUNtm zDm?I6nB5SKsWJk&9-5l;mFg4eW{&Sk2YWlY z-pY$+L7odcr1A_w)ETRV(DNs%Gnw)L3it+^XaR6?%P0jWvoFEb?=&WDuPul)uC z+s8G(&0HNJ`;z0r1_i7kb=*zldvu4^vhH$q;T%i>3&TRk#{9g39aU=QJ~<(I)@pGNAo2A;J{;+J{tHX zrFg$n+%l%2o8NI$jP-v;yk7>075cJl_!0Jhp19ynV#C1(d{T1(p|d_4eY_4ew!nzz|0JUQ}EbfXK~{| zR_us1H3SslYUTkzI$gE%3=GbfB?zm;iLUS1xxcBz*{%KfqUSt5Y|~p*M56^p?(-sR zVy?ILa5WKU7aPa+H4?kh)Hx>Kx=y?LK;lJx3lzO4siHo`lE@WO^;ae}V`ehaPYl5J zfjB@-Sz)ZDtttU!@Ah(-8K1Y+7fmH9R49N%s4VstJ=H%_J6HYk73KHCoIzLZ0=}4# z5Nc+ah>7ehO}!=M3L=>g5d3q{r~k*ByBeQeoU)TU@{E1vsEe}Cb8&yJBA2BOL?p`9 z2r6!z6xuV6;3G7pM#ngWW&)-*wU%}i090yvg%K;p2P%zzb=Ydf!)5SIUFtN|eKi4K znie%HewAgigr&VZ!Tw067uj4&QWalXtQtit`M|CCr94|ZCZ8ha6^vaL!%Y~$PR>NM zyAE9A*4s`~7{P2eZVUOf)_COYW%%_c)W(c;#LpYBhKqLq7My7iYA@R?LlwywtRZ;E z^uyb@qh9Q^@aNng}$%e`|Wm$s9q?$$)@7|WbMUm($dk`R$LO;!dk4~ zZW?#?PuDg3-ar~8Q$VJrIn-D2IV3YT8Cp8Q+-PKSafs^4C^D91t_FO4QaQh}n4$^F z?-Ga| zgQdBK&i&S2H?(W2ia`IYu$PnBNE}>vxWBB0=IG2V&odLh)c8*<>1LUY;2Tk zTPY_p@%UtbJp=y{M|RuKeTW$C&g;W0=3(6Gk2YxV?jTVaU)Z_Y??ItUL#L+yLk)^^ z3$X|4kKzT@E*6i1esp|v95qXrXs2eyx%S=-O9a{OLVoz57{EK?w!id`>MZVGAI;kY z)g#RWvLirP=dwGI|B2x&#zM`#c<`Ci$B^#{8%mwdqO64dx>K_hr1ud(-j+Db^1fW^ zl;B$wf`d(jS~uIRfI(wkz+4g=CDg&v=vK6Zg(F zS>2g?2RNS*EhM3?P1 z@c7$p-N2UW)e+S-sh{cOQ727%6Cvz__LYj5c5+F$H%D}`+|4MBa@>Z2XN%ggjO37R z2;yKT)*)0X(s|BfA$5-HN3husHo&a$cWV~eRv5jR99Zm|!1=PEsQFqtlfHF^@Db9< zqfc39o~;c8hBMn!Vg#mNcmX`4k1x6^qov*6z?PycQF7!qrk&ySn4Qbabo$=8dsEQShxcu*+r_};^6QfHk0Xvu3)cei-0>M=CLT#8M1I2-;vTWsHfT9zA3I&)!`UF1q?CQgu}%&RrX;Zyl= zPSbcuYI|mKnj@Km>)n2hD3r>WOIRW;m)wvUdTk!LsTn$I@@g_A5ys@+8~B^dfd?cG zz|~t6UBI10VuXvKfcCw+^B1hsVU~HHUe#J7Qjt{_kvLQP#ln+=v7M&Ur;AgTf=vlj z7}UZC96WSR9Z!Zzi>9iIvl1Dcr7*bO70O<*n$O*AoqE;3GPPgsQ)mI1S$#@NrQ9O;&HH$vI|r1# zde@~(bEK`os9VW+dKh1nsod`;7)iEN*RJw13!F1HFpnu$IvAera$Xa^fvaPXu3k0f zp(I_FDu{#pv~RWctOr?iRETEy_~kKtiGN6V!TL?%@)tU@4w5sDX%eWAG*YqdF_>*d-)`O*C;D#O}gLz-5W>1U`g0?DKAsfq_yQnyUla;3c zq3WcN^H8gLW**c-0wV_{xRi#zsJ@k0=Y)fpDBvm*hq!wq>9C~+Jq%+3(_HEY(4y7$ zoQ>fn)s=Q&{A-1N%Iw+_mIr$F)>;Yoa%{A*Lk<9MpZmm~hzHxyMZLjboHpT==pZt; z0e8o|Hf~0BCxBl)%U~?$mD-@FdpA5+@cR6+EB2n9Kr(JO*REs7hV{=%(u%DoBOJqL zpb~e~BLFfP+E62{C@|YoAyEoSVtC&<9cXx&~T;6cq zC+&_mg0Wj0;ro3ls8BSJcCVqXYdD|>0H$QhtXm3OQ{wjW{w;?(l9}#rPGQ-?sFV-0 z$k!v>*%V^isFF3F)W`OFJld?7w5eyZ#m5itmwlCbsPa;MvP!n)ywLoqMK$b$`N5~8 zQ1=pw|Q1D;?OO)xL>YX>-5TxdIhY z$(Kgm5dPPj?fNi|5+yC(j zAH+4=8+3%wmBj4p;LRNW@7>LvG3XSRvp-3`M3{9Cd~&#h9pGZH&<3$pot?@j0s4kQ z0ztSjFD&ap`BC(z4(#nIBxj{NJV1M#P-a|wGqBYNAOvYqxxm%#olRmcOJG7 zNQ*YX7D3HK^m$_#3AJ)+j~WxdaF=1FXi+sV6C9QZNQ&}{rGEYd>i`1K5a)+~1xxb} z)C%OmeICE0>m^3iEDswKY)d@5oGHZK0%UuBYqS6&emF!6Fhm?N%d~G$4FTFRR6Vy{ z3=DShG1>>mL$<&3__18*8=;FmD1nh^qL(*G@UE?nYVdv2k#hh;H|VT}$RsM15$kF+ zrTfdtZ$<$vtxK7%wuymuUg_t>eKvSzu&Ey%qB3%r$_5I#Gs|e zL4qNOe}&EZ;*VioKp>1pGHN8x5aFPy3lCqAiy8tAj5(mXB0~#Um}MxtOEG zDh6m?5KGgVaE5&L#f}z+{F)rc`3hJk3L1&t0PjNDWqp_W0GMnH>+;LRQEK&8N+reA zxpDQYu*6lNCFzu^!fk_g5*H1M&-vsedH29m=SYnookP*eU=yDxPJO(Ag010@9{bpw ze$!bKwAIoeuV+$f7hWUio&SCgWi<@F6V06X%q_dV>&)U`0258yw5PidMsmZ5!{pHD zgaC|AeR=Y_a}1dX@X`4QM={UjLU-$JBA~3!wppZ*@K&5cl5tADdXKCzYr?9<^Pio1BCHxHts$^m;v_B&i5C_oP*L zzszFS@sU*u*5&r)j3i4NXHg zw42i~I^5r_!(D|yTibt}+G$$5hR%%RcbZ7sLufbvy9dmcxc?FKG)Iry#&3Vyyys<) zs)v5y2u$G-0_mM(_R21y!xt)FubP^J%O?*;`viunM{GYP!c9ml_1o0^kULL`dM=n} z9m_YRID=2IaJXj3L!jy5w{RG>m+xHQ)fQ;wNz01SEV9m&_LjCe%*DMi_~|eNuk{JV z<+<8}z4g)(Y^E%XEXVeo>^N2#K8>h{-SS;4 z;+s1dvb7omLnB97!B|(tKZ&p<)1i(!!W2E~lRjL9Nydo`GH|zHVDI99^bO5hDz8F;Za3&nXRg6{ zZ)*4chR`qgmU;7ub{j_<9^GP?E@Lf#2|Xz}9cm*VuvSssmdmQvI)&;0- z!T}__6&}r$iAC~;bY0s|%1w&p-yJPyjk+IWb;Z4jv|me;1n&>8&ZVPB9oJf7Y|E=r zsNm)>k#vNLUdKC5`N-01uY8Kvj+QD0V0+G3D0zbSm_NbtfPf z?v4czS<88{$Prwh4{;0HqnW+iQ4jD|EkX@-H9p!&5;!WJ!0PcM;llC7;it6QKq$aY z@YCozln(vwe_}KIad-oMvj_Nr6+d@K{=+DK4)Bfbc2^hFyMx>1e5X%7g-gM#kB4$+ zIjF^u`G>7*QL;Q{YJAt;j_yl9=mE_TV!CKqpKqu%dr z?(n<;O&Jc=QBr7_&jyehPG|U1lH6jyl{Y^`JN>DgL8fT1wBK+*+lBqXXL(FI-2&ca z(@$kKj7hlI;r?YhN))QXVg^Q8UMh;Ym`qcLIU$!R(@e)@)$EUk?YZ*F(KCirm$k9M zIqz})KN;ysG-)-!-nf z$R!10Xm}ECwMZU`B7#UkJz9oZ;r(Yguwu9+*Zrk58BMoq<}|O#Y2}A4V4lGtF$Sw% zL+z7GQ7@Z%)AYN4;dhW(2$3OKhv~0Hz_U$o*-<-NIGJw+!n`<{%WWB-W6@;gG^H7n zq=A#3!AMFPbUXSi=C>x9%xNGc*63bleY+fJ*vLA1be_pR39*P5rB}* zADHn9ykOh}1;4i+L4j2sAP}EGU*T;28;p{Zf)ASy*s%Gf`Pzd1U@9I8{wEB=9U#yS z!_9A0z8=_;G|K@!*fWzrpF0R3`I$?bQG6G!>ZjeZ^VOV>Q7$SA6j`9Yq-t})Z0Z8R z^Rzy8$tXsVw4ie?!tm8U;7nQ^mm?p7S+f|>>VJC#PsT|vIh)|2`bY>Ut z84llKiGk%ADx4WgoxT4b0IWb$zrX=LXAhX8iHmutR6)2V`>vSGb^+N@JmxYjv$ExE ztb04L1*jMFHk>01su$rfl%)!_47L|+!cUag`_JEZm{yzXnIQdo%<78G%Z#cx_dK0b zF0^s^u|RY@7)7B5Sg7=YS+vsn0v$}gIo!$WxRuVgByMY*OK9Z=TAvDa+~s<=Lw5Yf zQkU%AKCwy9*gV&YhiM(wk`cNdw`Ej3@Nj3rD&4^e7hf(KY#uMWfsZ8n)1eLMOffWQU~5 z!eJeO3`Hi9DQT_10eqjst+An>+-?EX5~oWCee%#KBMr?WVAQ2dHAxhsjRiEe7yQ_! z3n+#XvyBX=pr%%jS3oDMqXSh-ECE~%6+7PErE{Q{X2H@25KqPO-9{|kxRYu$?jj5d zYAox%BLC09fBmmt$A^DAIQY*Ok zFQ=@Ii;Mwzc~P2}A6&_z-sX||jU%Um&e_;?KMviGgYE68Sx0IS%br^Y=K7q>b>(Ho z3VS&i=r;E9rzX@M4gGLUWNy_47o}U7Xdx;)jZPxR*r2vk&?eY}mBZX*%Qy%eEN}N* zGgn}e+q*kNeA`KkodpX|qc4)#xBRG>)o~l|-fMtXlKQ~$=s?p8ngPa`a#1Cwu3cij z{eH2Jsw^1&KsnxYQ~KF4W3GSIzvjO#eocS9|8;rzGdak4mT}DzDN=jj<9~B5O2af+ z$Wm#N(xoQ%jNNLoWK`+Cr@te`HUFheAKJfIlYc4Kw*=!(K^K}X$uyJ6t)7tUvM8X= z6)A;1&d3iV@{b=zt(u{qaQutjv7OT zuzasKi$RC*UeK4Qm%vq#RhpQRX0_g|a%ZgmXXVL=OiL4Dej!Xi-NlMWDN<(le2AE; z%@b=-u?LK$vC^T`41?N!wp%nY8IJqsetZ)bH0)R|{q1Ub%UX%_mD;0Gt#m8uN)gK@ zk)bJBTMfMmeWrTVRfXH1K&d$&eb!eU8%nParPmujL+SPPDGsIAhtlieGn8KcO_W}@ zXMM*b{rOCPKy?)h+RJna zrKdfK;^O4?@+FxjLi#I!eJ5UjEJL|O0ZUnuRKipD6d7yE{4fWFWl&fKg{AW(2Zd!& zSO$gVcdxLtIdYE1V|Z-x%@Ijto>P&IAr}c%^AeA?p2sfeE*;L=@MdVR`TdPOwI4z= zseV*6VV6vCxdjurnvwu4d%@W0aSt#?SY*Ytk&2~ce${nu8hI zw$*5Bihuyv9&)_Q!I1|ivLJIiYErKR#}svSLbxM)Ue_uGA;V=*d6v!pcqj=7#1cI}q2W1EdR zE;cy8y&c}u7ho(1=!e`ix{xB|{()>XT+I883y{NYA`J1xE4q64QuMKqj@%^;9UJ!U zVL1shbvg7x)C7FmAk6*CtQ@bkIASJ=L6&YpM}NV8jO~d2+9UDrR{q<3rbC;p_U-WFfOulR~EBjNQRK1b-*g7{#Yx)3=h*if<{Run5hJy~JuK9@7;dh)0TD z%5>$GeUF@({!z(tLv(9xZhOb*+@l%MW#L_U8KQ$+wz_c9AeITjbTmSd3YkvGwagj0 zlUZrS%d+tPZkX0T$|=im54aT2pfTLrF6=wSk4s9>Mzn|vLIByauRyv^ct|_*zVzimU889zdHb400l=G!H?kp%Q``aC7PqI?D2neH=7z5#!uo88J zk_pp#Lf&hpZ41ym0v6V7jtRM-OF}d4+;>o!A7b=9)m~@!16DWWwu>YH^*tg>c7`)gWYXC_~;Sa zF9w@hR;bSl*E?s10$7FD{OMmLr9a{3)a(rx;qu+FWU&W)qBt-RIeQ@yxnZCmpU<^4Q`ba7|)DpKOZWvK7kWu$7)6w<|`*LfHpe3;FS$IA@b3W66efW!PEl=t>+FwZqTQMM1mX?9(( zq~ihgoZ0EjjQA`Z+g?aym(59)an{n$Pu-Tk^3T z+acj13zg4Y0H^kM1$apkMgb%PP7PMK^fu((kgTT<44{;q=riU4yy#i`apkmgCX>C9IQmoFuXorbE!@ z^;&}_o~u9Qlg>q}yy`CL=ly}sFR--Nqfz$z+J}D|>A+`JpSiB}z5=rsJah2+WWkbK zBJV||91S?M0f#o=&;}gZfI}N_Xaf$d!;kfYyyN*@Kc5>z#A=8y{V0pUgGFLB?%rz+m;(`VM`f%_M2Y=^D4hR2m@DB(7@B83i zG!zA^!>X?Te!QN@(!(dfwu1*h=1z(-1~%lAa>Y-7o{(#v^NcDKhlb9p< zR%2$5yfl!NdSwI?;?nE_jc@22$i?RZVBmZPeIn-0jxH!5U<=vbhd%cA2^ZQhnobCi zALo|xIn#zDRDg05j7U_If~sH{!-wBrz3G<~LT{+YS6_j%|8seEb$)U7_U8Ca4c?0h zsGtU|c0f;`qn1cv|~o)4*Gg&m*RdRCL~IWW=X}sam`2(F=L+ zd#Iq;kI7k)VxIX6SqgJqPA{ohbisPYn^%$CHjDf%99Y*Qa2g8L&Wdi72Z-qr}#$|yrbIQBG zvi8s1vmfBS*O*-sH-k@nvtstWHq@|LnO!r33S4y6@|&MAikz& zVkO5sN7$D>`hy3gHdCnmMv=3WmwDG*(LcfEVOcO#VUdxs3!)dY%u+I?$t^NR$`xEN zmUu2l`BV4y9HKX8ROnz`Us!cEW&8U9Qt$enRB7|YNzZS&W0tkmg}i|itBNPGGB*n=>NRMGkFXaBw>>hI5C;-zrO($+Yynej@^)OcR5sfW;Jm&j4{W{?gO`vC9MBUTJ4ypzRhuVB?szsg%hO2PkCvI?$P!CT0Am0uSb6>Kz+ zgvy^qBSJH?kmY7a5zTxIf}P%q8l4hw=DWc-{7rs6}J7FG@^PK`qs(bCAA z8lKQBTTX~aA0c>~XG=JyMd~zf9Gw(|rz|g|frtwXNRB;upj2?Qm^2(EebS? zV_`KoZJ01)ugF(l;WvrZWOq z3q=EQ@O5K&-MFO&%;%{3VGlgC{Pdx66vn(Fvc1(0?X9oKH{@7Uby=LHNzg(sr11*o zfp%X^93j1yxAORy{J;!7TohnjeRN2MBh;L63b{_)UeNC<=ICf)y!8hCZ zD_9w%AtB~uw3f@j$#kFXz}q%vlxLQZ-gv8+3tFL|II=sa$Z?J%MFZ}L+%r;S6oQC6 zA#%IL;}N;k5AHQ_7JyYs+fxyS8iPB_8F)9+Y?ov0R;PMB5NQRp{Rzg}ttf`gIM8R@SREhK8=>4A*VSW*5fmfunD>Dubh_ZL4fYE|Y{#GO3x=N-B=<%V>=e}< ze|pO7ZfNg~)A+)W-%lb$J0g4|83uIt%BQnjz)$bB>S_%1pKiB4{A@#(*Pm-anXeDG zlhW8{Zmmf4`14pDv)AejX!Rb)=gu`AyrQ3ERO{{e5Z0Dz|7{#NAN5l?+kEbvjPiV! z)`P^prp=YmkqNQS3z_zrd(|lTqwZsjq%)rIjkE~aY+mz>>7_O-hiY~?Le0TTDWoA> zBw3jvsuY@ny1tVt^f<gglh(fCaJvkPs}4ef=@6gTqAwcGN5!FuUsCdnYyKj^Dq6N3RW4 zhNb)4DbZ;|*5=pf{jLUsf+^D^W%yq@m8Gf5dlYv`8&~(xfHSJirIJ%9LDAdT@rG)H zl|Jsl={iOFR_os7PUS9Ndc=foS&Fz?DVRIPQF#YL zVYdu9QRlrbc|Ec{NLXx*+yYFvT(VHO8R_>=)5|F8GWmRG4fD*lT-MrH;0?10fAh05e6! z(uWaO?^)?T#3ya--`*VWse%ek1vPxiGj5iv?^v(5vt5OoQq9iujc55P`V(kp+qi}2 zvw-}f!h2sk@Y$nEzsyyUU>wp-rm?(nmr?(Cb$Q~Ny}8C_RRuo>pS!v|@mvC`7Y0GP z`TmEC=C^u_CRogwrC4Pd<3`>}!@_MNTcYU`E(XW1dB#TMgeimnvTOYhwzPksl@%?a z0S`GLzGSm)zrQ~o+i8z+aAT9{@i+VXONOJ-zFZa&$rNPjNQjcx_UO((&~a6)7KQ=DID?<5W|{H z+gUVCZ>yBNW9p8x`=fiQZn>C`t>7BF@SGzEt~kON3v%=B^xZ3RoTg5Al$y=T%x#BB z6m>cReBp?cJpEf&U_>V>VJ?TJ8jV{ib?rdz_Y3BvUQ}nDNJY5Ou(sZb=)fZ#hrT(x zL-pwl%xrPabq0qdifxJOC(U&nznV?xk?c6?s?0x@=&w>Sxch;7?v^cwP5U)(TF0fo zy&c=(p@drHqvP=piE5lO;tzekmKD6Zz4YDZ9hN`YSc!UVw9U$j3t)t?&v-VdE88C# z)Ro_|4@~jdmi=BE zy~`o!^E%p*gC)?gsJ?95(6+DgEgGL5Pfo-KenM*AvgLJ)TyK%-O{3qj64)x!KczhH zD5V~idgSYt;TM)(ZbZfK%|4jKDF5IiV;?WIp8rBRD3W3B1g?t2}brKyCw{F1A*r zye326d6@OBkoOK@>-&*2?~@2V{NTyhn)Q1l6*F+HzLCi$Uawz?#SoUM4J5PUSUm9R^~rbN zefRGUn`A{I{E~XNE`~wff#=&}0PGVZtgcTr8NWK?aIn;N>S-Rjdlg)8I_ZE#bmouvRNT%Ino=p z2Tj0?t7al@*BDD)%In_n_hiyZoc37t=j8i zSL=LK|8sYjxw~yOSL&GwLamip{aH5`Bc93;@WKz?He-qT zD=D?N=)?0+?7^Gq|Kk7k*Cz6|$zy!Ord?exAp(#3U`an_mjd%DxC`>Z{@WmVVSWJxdh2teFF;8TY2`tlG1Dp@@|!FTh60;(%vc+ z7|x@X3anzKI2qVhI^42lm2S!<8iS-yqiS)zgv9Jo|J!Bps9Zi(QMamhPd<^tpYWQ; z5~nTeSl!0eBg+Ib_^cvS8J?6@qj#l0HYyVBF2cYT*DR5usg7y`A#YY&tp+JtHBP2*lK4RGhyf2= z2{p$%a_0AE*etb6VU<$9(wGk;i4{10LVgH!DnV`Z3A~vZt<)HFq^&l~Oe%KA6gfyG zv|)EVF^3cKe={XvnFX7p@4>OJH{KbahFy~vEy&S%p?MKwtO z{QcI&+-CTIp>_*P9Z>=D@qim5=1y z4zZ_A(YQtyGL!S=b)gtdPo&UB!OnX00ge3;4IQFa4|{y{fxTgfk*2ng_gGN^-Wg8Z zLZ+I~T)LBZ&hMBABYe!+3mGM^Te6?7?UP~shkT}A2ifhhN&amirY)NQcM>TQ&5F9N@Y&@MsQdc6jPt%s-HKlYKK%Q zUw$gp=L8HtL*mq4(ATcXQosB(sXeCMH8tvx6t(%&IUQ>Mv>_eRgzgIX+%e&4$B2e5 zqwkpBw0~N)j&vT9nRYs~I;?o>BRPGzzGe5Fw3Yn}e0aq=l(M;EVA{t!11J}+fMzM! z9kWwSxyI_M;2<;m`*q)LVbv7ylBIm+Q((rWsw>^4uD?0+wz-$9ii^Ab)zvy62v!+l z4>T__HbP7IcjI5AU?j;(ZI~JnKUH)w*vY-bC55+w?G6q;nzOpz=haNA$;+dcM=xHH zUy1GdrTg!G2yd#3@`04odw@tc!7nc3~CmqWri9bb`5Tp zMCJ^}@bZ8AXTX(Uh!pP0mj^JULsu|7r?;pn9z!ZV`|^Wt$1e_v6!pjNxU?#YAK}5v z!^zr^zik+DZL}~7yLh%!&lEJOpKiBVFci4^lBET#!wY{~^{)R-=J4pM_}#aUzAAbK z`bS6W(oOZe*PEQ{L&(&zjGmCUE}thgP#nlAxu9J+2ID#?fLK{Fp;>HB^OX^B?W?L< zz?R`ELw>5BB#b5tG)ZDde}#HyWN%s~x6F9Cg@ED$(!^v`*u!}3_FyD-hAm+M8`kf! zmX`Yk6YqtlhU*!p)6D@UbrJQ`YO~0`>F$9kqnWSzYZ;TzJd9ZE?;C<`oe^U7nrhkn zRKAra7%dN+0p*YwmSwoop2rn1sJ(jnr@0|x;@cjJG(28$I#M^i0PVqwE8vp&hDj3oejtl z6ALREXCdRc-*X5&sJP-@0*8W_=7xNeOU1s4p5G`$H_66U5a4DFcI;J$#@`}w+4%d~ z?%tivYNNBO(a}}UnzjN@*zndydiiALWxI9kL-gos`mw44a!{`8&%4r@vNTpYUmI%J ztjw;Nsg*LWHK|Yz-Uv&J0xKmO>6o(Cz?~vRLm=MBncY>W*z78162-JVrsd41v4=v# zzMS?Wt$We1BCV4Wz+r*g%<6a_e6>$kwK|OUF;+b2`jVPOrvnG#8QqIr{#6Hfp~mUM zW~~n3l;Ks|^>Ol!E(uk#6sdC&1|P%4%8TFH1IAfFolzj%wWiEa%a|`GFJC;GJlilJ zD{f$}(MaKKt&Dh3i08LQkHVgQ??l#3k-Tu$T;)^X13*xfY=mFkD{ffiH8_~jOhdij zQiP3vSogQVBio0+4V7)0D+XzuoyxOkW*ITwqJcYeIa)3|nYoeqyhmT$kHywM~ z@8&muC@2WeVuy44fm!`u6OGuz4D|1792sZNp09}hkpUwwRgd~tRN zJC*rSR#=00B(;2%Erj*suoQy5?Z*Y-QKeOevL#^;R-55>kKGq7%7&N9jlXUv3Hw+L z0*fDXI>kF3;J%h!pR>E$u^p=9efmSWZdA7`kcNcN!dwk)kIjzJK$P5;QBW-DFUdz?>T>Hz^jS2VL)f#&c5TFWh}u4_B@S_%4b;?2k^AYR4TC`0o0c#Oev{? z!X1$T4v|FWRtM)1S`64~TS|gXCU$_tY|{_dR~vwFz8R^Xc$?PxeCC56M$S@-k!G`9_XXc6X#x`f>!X(GHDjYGb0B?7iFz@cnE#ipN+zH3i1jt z+-6s<*J@&1)Ng)P0e}v+F5Vs}kb9q0f|1S%EU@?U)9$px`U~Xz)XT7_#z9(>{v3~l zgX5h3A_Y17;o80K@G}u(>qc+$K#l{g9_v1$zm$sE1G3YJd+eWHJ!S3p@VoSQU-3 zs-$$+P$s+5O`b}XTQ0W35$ddd_E50IQ~>Vl1+~J20uVGp zP8qsh(pTOwA6hgwxPW-W^cBh#g5ig0#3=`80TJORdPjLiUF`zR>}@rUsnjs!CvU>u z$MKrAg&f#Ej=)-TIH^wJ1yc;~cX(Xaj%m$|anWD#Dc#Nn+mFchW5ElZtq;7vI)Bo! zu`O%d`mq7gFipn>{BX_G9Z%S&Iyrt@{NU4^_Bv(&l;MsUAIVokI^ya$vjaJHTO3X% zSF_@&yAn2cuR((ZBU?DTtH1QLlvBJ-1(N17CT%(j$%OM(MS z!HrbuFklGB2Rf4v?p=g{WwIz-Z5=trf%wMPso~C%t%rAi1V!Ab+$TuDlk1Dq^A8up zS^m{j-Vcl%RGJOA*|uvWAPW<*ZYg-Ya*MTnm1v)QXeXx4qYi21{RmQ;W1?tXWd|$| zRZ$=vjbiLLKJ0CY@PDY?GaACf3eLx2mM$`Qdn(n{QGOgaR1pyfCSSrgA zsyVyNWH7XoN;5TdlKc_L(kA1fL)UC;l9TO3xn^TT6)%<6rj?l@rV0d!8~R!*IhgXJ zkR=txpOQ5D7ZufxS0}OBf`+y%zoT4LewitnOYm4Ln2`qC(0s0KYOQ^P0|I(rt2BQ^O-T zL-wXB`E@7`d(b6KSJ-h^EG2iQe(E&aGB^@{xhRs3drJKSpcGr47ZFn%h^n?GDg@@+oCm z6&O6vJpyQt?Z&AW-JBNn;-AT!kr6jOg^S%#GZ&H%yN=yqLfzFyDyFKA$a#$+7=QS~ zADZBy*WZ5g&5)dP1we1kbPSIwJ!A-?HXwt6-_Q`ZZ6&RP{muAfYFt2rC2PSmgP3w$ zfSA6><6d0066ZmOF&>kG-!U@Q-z!;_bu^X(#JFG-Y^6p|UsBj)w6qEgAf<77ZkN3{F_Lj*L%=r=I<@n&SQ-85X( zbCd*RH3I;oanB)r$+j=os3*%W8fUH&D{n)_cK}jo=CB6TM=3|hDS+X*NFn#Er&1t9 z11F%FsT^qloMFGsw07w+W>no-E!3^2A>bQz2xkpR$9)Cn7}sk*ax@$sxvWu&XhW6b zYJ2~X@##?Wm-?t*S8k0q5R24}_{$feGSV}`L8GTSKdprH03%B@Ds~*IV0aj_H!X%7+UiFV_QTm-}>(Ao4b? z{uidvC(UL`8K9jiF6s`2fKrZ_?FzHzX{z$gC3$%kpuXN6cqt51W16v7u?p?+03KsI zr6MmpVwkYcP|%`)Hgm-x{nTkuPN~m*x-kYINwJ0&(>=P_0jKqdh<&5(xxijK3LWeJ zLpzBehe32KWTl+3(C5d8oIIJqhN&isO{@k4$!|p*JGeI{^wd=IjSiHtuJ% zDD-mll^0y7M|em-&LFpw>wC|a5}|J#m?krc5O6`H@DD-KNdH0<-2$;^z#!YEvo|TH_53F+8q6w9`hEY}G?FC<%RxOmJ>Je;-OuLmma7T$O z;`BQ7rzt>dc~EaQu+QMlBTaIdRiSJE(7tvvw5JDh2W&b1z>AG)L&1I`YDFu@p-K1h4jJ9o;{o0C@fbTKmu|9of13^BP}6T3YchCy0P%`}nY z;|41F#r(~ScKZbBni$Vb32994v&t!jngqe zWyXU)5+*X4dhcT?z!QFsB^a~ z)b+bukwMFBzD^r?i8qH?Ssl9vhBG#kYH=JMvOhN7a!+6y23LBvSUljK62{bqh2KQ3 z+CB#-<0yMn2*K6I!3F?#nV6F~Rl0pFr296K^EuZk*?vh%3B3ABFNU}JPIBiro_nLz z558zI@}$oV5O{HEw|?^?>9bF=E?<4^T%oVNJb`xM@mkc@sT{n1@c>NRMTPJkI-CZP z>M?pee^6^fU`ssbiRjS__jD70W7w>eimC<4-O+WiCYB`%X4yMv8+x5{wGV)F#&TZG z(v1K=LDmeHal^Yzu#u2ih-P=F@&!WR{vHSMzgVjA0jeT6Gi1&zmHfllc4S2m&hqQr zoplOKsmKS|{2rXJBsqY9|C2#w&8^10FHlBxDZPd?w21197BZtnnBB&P4D%WmfVY*} z*|lRuH3jSog+><$&mdWpeHT@eFr&9o3g$}NFF^^S=$ZOM_daKpi3*}NPZ0K(8&B`( zv<47cuf^*MbW8UYr*+9O82*DMqeBj%6}}a`Xfm!(A`=E)3Fib|;llMySZSsPlZxlv z*zZa1w>$2#t>Sr{B?;ztXHALxj*k>0XHzN!D^imvb;zN#huD2uxc!5r2zY9#hKi~~ zlA+p$@$QTmswT|X23+pxr<{K0u>ueFj~zg={`Aww4sp2a=H*_hBERQ3f*&+_X`6iY zbb@B@@A>BPY%N1{hAtQ)fiXI8JhxlZOKAEPx!{=J)c%DAm%5IhJ=z6Ag9x*H>`iZw z{d810vyC)UqWP>U3>8dPx>#(T=Eo#s`@T@H&&Z%@`$KrVxlcgaLY{fTuWS^69MN0~ zmY;-812bC!GKghlMD5;C7S&|GqE!dO0o7;>t5^rktsQmy3rcWvyB?d9vyzD%5@JRi zf!*DkGS0sI<(>JFOu6`M6tx0k2HE+Y1ssy~Z(r;W1IBnp+tmXGfbR#mLP_J8)&r(C z1Ebtm;^>5Z0aAJm>|?1{eR$pzKubL|4rW})>@G3x`W-&IbV_qKmjkc2oXQ-(M}i#O z(5@lN!b(~`#k5Q#W|x(!gxE~Xq`jFMsyiksTF!;1nL$()q(+%BLm|D`;NRTlEgDbS zZQGBkPTSqA!IE2ALY}jIw=Vb9AAaZ%$!Q;68lRgSFoi&7%UXNI1onPJ?)m+?-O8Vn zj9rCb#Mh`1`YqA*TIW4(RuZR^f@qB-sOH`q(pn$7(s0FF=%i z({v*L?7S^=bInSbxEbTaOI90h>rw!pT?a^WQplB8rOIHN~-#B)Jsa)L0fwM_EYWuow1iXET~`EJ`eV zR?sP1Y7N#FI*aRSQv2-U!4aFDD;e#qwJRHC@k+g@HId}RO8KMVk>9Y36$oEw)C2eo zl)#VYb4bW1UsmYY?#AoUfY3mUHCy5ekKL?B@kd?2ELLZQf$c9s{3nwsd|mhQhwJzI zcL2FJ`SWLTHsgj&sh)<=m)dC>R?mL@YL_B4;dYiJ7>byfi}dJw|M-$ZA8w~)ByB@+ zY)Y{f`g)k`UrgkfPpLy%ZU4&0D4T|F2RC;ZZq5wlg^uV_S9INPz+WMDbU+7|lKbne zU9yvUpsO8pJ+hz=0zUN#4er2-$;ycF#N)Hufn2Pk>LKxXfzbtEL6T@kM4?ljo zC4ymd>vPz3QF!eK0TezM4B#yN^6ysH%8OUzC+5Cm6-K+lGDGgC5>9fJc73fW>kCYZ z+&^_7c!Qcr$ z4*OQ7dC1YT1Wr08g5A5ed3fee4i;wTo)-W1_D_)k`QL(-A%v)hG&hh_cK>58A^acd zYt)%ma18%Z(E`W*@ep>1Chd~DemP3)MX>H7fJ+}4K!+tGpN(vRSc4qw5`ffGS!IPT&@mq|u{oU8!zTP@bkBgS&a&uV>cNZC4xPr)~{0Y_`N#hfO9lU00z9G&0fd-W{h2-!yUpqP+ z@xiUV2d7N?9;>>5y=O9ng&T~``Zs4b@zJ+%(``NvO6SU9` zTyy4MS&rrCL>6CeuarJMrIGgaOVNxxnC$y$N6r5*T7Ua84VHqY;aZ&CT~6>;oj z-AmOaxbLd*%93x@{kBtaw@2jc8*pMpU0-MA4mjBdVE4yo^Iuvq>&~^XesnLmgQlf> zy@+XG>+LyT$x|t16>91vty#=QI+hCJ$~cCa)k)D@pxLoY)I6i5tH=|lEZt*y?ve%f z!{JlkKI=rck7j0fOp!cMKE166cOhe1GVEveZf-iERiv$pz^p4v(L#zzJQY!vYv=J? zlx$mz7drZJBDF z7DRfkO}yGGrNz&-=E$tC4jI1V7q)xm>np`&jMp#FaU2sYnLASIt!<#%LsVW{9!J|r zw)u1tUnSH;*)ZVs8fetsQ`JFN>uIbMyJ)MXu~RReLWPa`+C}Y2uez~e%uk{I#(pFq zJJjUZK#a$)&QV`Gsgb06#f}X_f7sd`dk?d#fU^)xN7t{oKFBhu zJ1+C?HA6O0{kc~Usjppk#2qS%Y$E1^*A}U-oj1Ww^+xKeL*f1XE@{83a zsjnxQF@4>2fLbN>)vanteI2@fNqs%48kJn#WVwHxw5aYjpr>Nj>CiZOV-Iq!cPU4o z=#lK#feSMN~5>%a~rr=Zl{q^IRf za_PHIvrJaP+{!zaU?ERZK3Cd(U`>=(Ik!6D4)b>EUiFHfmA@iqd1%wG1Y^fBB&S5N zjF+5YAvcQmabiP8R&F#a#xA#|i1TyTI-L`cYd?cqll18seQI*W{_giQ*PGk8h4}Oi zLusT1hmV!qgS3Qtu7DYCSSH2x@;n=+<6^lDKQGRJ#&VKZDiqts@7vq(!P}@YcpW!8 zqZZ%*lyMW-D?X0@&_ouwk39b4Yf>_mIgSJMi(_uN!tUEvylj5?>u<_B!Rh(6?mE)cgv%xt{$E^gH#GG7Qfb2&7k(r-)e>tkm}z-SB~X@#5ez9X)5hdB2iG!}Z>wmvo5>lL48 z%qs)-Bl~uAzBNE^rK=7Q#UoAx85hi^kdA^Dc1bQmT+PzNjTVQ<*`1P_8d-Vc16xC~ zTK5e#N(+)Yy8iCGx&E(dE;aeCkl7tMXQ1FcWYTIi$`6x*u}>9vkW_(cls~ydfipVk zlB?C8n1G4lMW2CZN0$6*D(?qI4l2zCe5*S$X)Jea0lwDc(fkEpXejeWfXsGvA*cR{*1~xZQanKQ&qmG97#r72;paWz4bgBpMs+i1IgxXw z6wjF^K{muEEnYkZmHKRgTdzZA6YQk$M*_vZw<=I*KI7tvh?InUfmR3D_^aPUc2%== zMhkZNDM{77!DpM=w!ayrPiaXb@|VE0M96%zrSg}P94F#E%p4kFBw^N?oT}5_Ksq8%w-D+$A%fIa_uw3`9x*WM~Bk6kM zhG!cfRBM^>+MsQ)rmGjfIf&o=y9_(#&ijI&5MP-|$NK;meq?)h6Q{aUqRQJwYD%vM zE!k^5^wUj%*EM_vnUyIBDJq>S?!hg)PP^Fb5%fF6<|klGJL{xdq15Y6luHgEyyr~3 zuuw}`Rt5U(_s{t0Esgk{y$X-+kYcL^(z$20tw@`B{p~m3Joo%tk5@0c)xy@J=zA1> z*92+@L(Hcm@{-W;dt>4|z--d+?>*|hJLIN5n`l&p=Cx)B@ik9xSbkEJ4V6lZFxnfUbT(w>a~RZj~&By09{Z-N3{ zpw?}vF>F?v?GxGCB)49Nv`Ow@nE5fh66)PlGb#plRe*J};doe*d_&@qc}O68>;*t< zv-lN_3a}i`_5|(iFizaImR3t9S8!sk^I^<>VMKdh;~WNciw)Fd%NsjnQ~O2r7fPYy zx8q&H(%SaLg;R_enQ?nab2hKIJLv1NS~OFrd@AQsxM3!qR(miK2z|8w_6Pw?l;_s< zh)wC7%Sz$fMv@oqClJ{SSp|~@)`tnB(`)GTvHlU%_gwn3DORdq9m`Us)jheSb*;>G zY2|x|DiJ@%Z#30``3?N^)N4fiIuwFn^e=d}=ul0eHul>W$#+zhkL479VOv2S7%9;oO#(Z#A50xdzSBlA6!*2d_F)cYQaZ zvh524pzfLcpicnry=<$ z4dEgysvL~YO!(kcz$z;_YsTUpBh!Q*qSimb4k8sCJus+vt=c}}W?GGgnVdx${E2gw zo5jhfkfW0swLP`^yit5q8L9M1&gQH*(R?zXDw}e{GE*sbLQ6i#q-4Ega4y{!_IY9_ zC$??dII(Tpws~UPw(Z=pZQIF-jmh(WXKJQuzVz*~FJQZo0eHLoic&nn=mH{!a@N5(viIl#dg?PjLPr zN3&Q!=J&jw-l?Lpu~G>*eB#uQaa{QaiPEdSC>TDo&2Fq_kfI9U!Cuhp&pEqpukYvE zhFwpWbd?^;G80H;l(;o$yFy!})1% z*Y>;CM35KRike`t(u40`OP%6$$2REMoAY*j@lyPnZgxN3kK>I!EByGnYfSx`jFr%I zWogUz}qlH#Uns@88zQ4ZscQ?oE6n@iGHiwt%;nhxF1VD55nV8Hh zrJ;cj6)+Ham*YZ$OMu|;d=+ysRC>pfJcn-!?yfy->N%946}#6c3t_V(EuxBomuTjs z#H08s+<$ge{w+Hri*UxD!mGx@!^@qAE=TYTU@OP;R=(ad%@&Y{#X0#|Cx(is%Cz5> zq%LGvJ%~}Eu3`73QX9nq*f$?=Eug5-A7ms_WU{q%eji&h2Tzvm-DLxNu;uDxD)1I? zBC_Uyn?cBK*Ro}Ctz98(s)D4ti&%)vb0(T0UR|BHahTii<>(E(sk+}m&Nnyu4zQeo zW%gQEar)h!SA;(Hg9`TRiO7I&OidW`$3UzSI0|yfS>P?M5un8bBqkLiOCVnU%oq#b z=tZ+e+!-T5Ob*S$xGJ2K0JT>6wFWCM9@mv8&WY~%9QP%%x>G~KM}wBdbe zXE#h=!4o1e8g2x@h5p)_9aw@9k8!$NqlBCwd%x&_l>0*u72}w>jD|3`cBS}9;`5yk z`;h3PC+oGX6p)~9k<13N+F-Xec8zMS*i-Q7pns~uNNpSTt@#Rq2CcXSvpDtj7XE_q zs%v?h5lh=pHZ@aWl=NCCzs#jT^-XBfKsI(ko+Q~ALQc|M(zX|j_qa_tQ-+wbiz*C| zMe1iZl7n~+wr4963XU6GhVwJuhyS9VH?Nm1IwK_HpAURDx!(W#3wAgHC+1X4>{&Uy zkeCRSldFKZd~$9su4&Z<_Q9;L_+4$l^Z=*6Q4zYcU-t4ZtzQbqYJg**&WVtXQ@p=* zrk&_X6IOII(Q?!*Ag@KeU8c5BLsIQigwsj(1vFtjHMBnDe*w)j=3 zrZC=5i@|1lf5O%SY=}N)Y(Lr48`r^o+3j+m^2cZR&$Y$G5k{)Vr$Tb~;&ngmyY{&6 zBm1Y^9lxKi%eU3?+hbwF5AVQ}>nYa-=ZWWve}b4Z>65(ClS-s{ZwI6yv_GDu8{q5R zmy$~JX9{19=8u$-dIJippAU|dLgN5{O=)3jEri91oNF?L^D`1jMJ4;FLYasT5hFLcAn!XjU>M>Wp>Cz6Tfl&(KoaYY~CEfs#)(cE;kV5N3v1 z7h1l6`HvKM&1;_mfj1T;IX7j5nH31)y6h!!g@>!AP3SeN+ML^$Ls3XF4{PNFhYcpk zC}T@+*0eR8+)AfR-#NtKS@?|EFS}fCR5IgS`TGP*pB=|pEr2I$P=R-ccdN|coPrXT z;gxrW*awa+37jJiQ3Mg8q>D|ICG%<}U9dISS{#e;p~rP@r;*kIQ9NDg9p9G`w~u0F z8;|>E%HlXJr9rlhVD^~A&3fC3KlQ3#jUGTstvB!;wQElcs%3&UHVQ*UT5zn`%^Mld zOQsJdsmOrE4X!PN+;uIG4=jW4Gyz+fUF(GHd|k%VH7B!9+^nWY){@_7jTB>|8K(5s zG3rg9=eMqMZL;mIS+Mf=rezXchZ&rZSMJ{oTa4*S5M zr`epTyKEgKaPgOsapkID9x#Vlgl2_6k?sVMqcrbKehrB22KpRtw_?EW;SNrDJ zf_WnV)7AWyNA7K94Ujh@YtTkBj{~aMrGFyfVK-G7!VXj75a$b&;9(M|aKt<$>?&`6mnCP3iWWNSl9ev2

6Z>AXfdGH(g8nYwf@1EUxYnalk$wRvU*XVk{|$ zl36!{f!B~#_bFcAkhiuhggpj*Dt)X{k3-thynufLSND6Nv+h8q9A4sYmIsQ*$XZaV zFF|*mF4Y&%>575!3M>xnuA>`bytAR4o35+>)>ESb%6vMVuUya6rtiU$b$GBrcG*S?773>F-u4 zDM>H5Rgn9nEJX1FeXu4av`a1RAq{A3;ToZ}91dFdJTxwM*kc;|$;O8pEcn~>VO%5J zMDAg2v=;5Ld59~UQWM3YkC?ig7N+?!Gr*v88oK-TI6zZKzo30RV31Wr8M~|k3I-t+ zM-iA!@;4DttMQ2W%rUdYg^l0!Cblf-FR>1f^N##h&Gx*3yp}0nBOxCmk-L+P4eGmr zM=u;CX6xq!#Epb^R#e+}8NGi{kBa(XwAYy=TuF}1n+uB zJwDSh*SaPx(S0AEfH5kOR*p4sj%UADbN%$$oR0qpRIbCyt4#9q*B+yi0l3Z}aj{ANVLnUp zQRa7J7hTbO9wuIGfh=rVEhs80U>$)*Ej9+C2l8aw_1;sGk)UQS-8`~(t7|t(HPB4o zC5{-!(Z}`ZGhhgj;{>y;e|1;1xI^>apN@yj^gf)koQati(U2dzlD3*i$r>Y)x5MZygCBNg2~p zf+H>Bp{IZl`x}pT?=MjEbthe-=1!^AzF2p^k$&Q&IM=@Hny`OmtK_2o=a1)XeiQ~X z)gW1)$*ub}ih}P}7JO6@%YBh`#UHxMq&?g>f3Drae2CWz*}Mp{PV7%eO^(R%;hE-on({@T^M=HjEh@Oq?{!RgA|Fzd2p zBhDhhcW^s%D2(lmJ==P0Ict>_qm}&&>k6J9K9dqX6;a<^Xg3b zkd#VMtMf${JKldd(gSrEDdix|Cu#KUbenFqku^HjZv0lrVsG~C>2dRVNXVs+Hd58C zN)^!dw?W;4V%+Y8BreFxk+SOeWEdE8sE*LO-zrcj0w^=tw7vyS47eN%WpzHw3c518 zpFXV~7Rbp1EISlOd$A5`W;CiCYO!FZWCJqu*P-NM?S`QC`yd*EII+fd$&B5cQ|mLr z;4a&&UMfM9YDBf9eyd=Rb>`f> zlO{u|h~#h8Jjmn&)OJ7V>rzE#wV)uZ4myrM5l6lVQcU#f7 zS|dPMpqkOzmwhXyhz3?U4TXpym-5FCrL-G;r>zQTU>(HF!`EIeTL+i9w`f{c8@kHs% zUGl=KId+SUu8CVuneBur1inv-VX44h3q5DFin*Moo)I27?D97V!EZE{g=Vvhil)$1 zaaFisiKA0Y%-+h99nZBUkcfn=&Ogh;*0Cdmo8f zW$!9-hNiM5thZ*k$v{bZZ|xUH2k%|7cqJk-^BJ!oB-;mEJ+dWDy%{a@XS&Tad%Ek|>#~ zR2{I68uP{}??Gbu+(uh2>a+G1V|);%u=FSl&L&K6^4hVsJZ02O zNPLIAP)JCtV-@>$6ZS{#&AilS1=(?uTuDyKQxyVQ#4wI^i)8b9QAUelz;L~}YoP~& z^i;pOenplXtmn9y34qylQ#*0xkW`wDT?`Auj$lGf*b(~Nupc5A_Np`VQmnaZgX@<- zzH`o7Rc7NiidhR=7Bee1*-y^T;V&DU(@K^-$rPc3tA%bvweam%_;DOoq-~SGo^VPZ zN}i6Q`o%iD&E+V%y&6j`9dnQx5xgTDt}Vj;oVjlHFCX_}ckY&`{kME=yb&2ch}Z4i ztX?@_*MHJ6uDzpZhFN^rJEIvTJrHrz@6q&fXEbDnDT zvOjt$%~QN}m+xf0bz2-fO6_DOU=J&4m4fWcxr6KSA-+ABTE&FA8r?FqXmQAL(F7J4 z_K#))GFNIhIH>N(zppQf=1LDyr2sN3C#Ti1grr&l^t4V1F)^HFU^8Rc`pTB(gxFE` zb^MSPqfwYbG6SmsGs5bq%Fut(H``bk0b;anux=NE!xTc*Vl)pz)0b-bG1w6d8(_z0 zg1QZR#^QGooMkX?pt=Tjk3!gNAe9BmH~1Sig#=A0Q|UKnw#Pio2Hv{~yWm%??`U>< zy3{(3kviPV7`BUa(Kd)vIX7}?19B@HRn{abf9b6fRTR{z3g&E@v8|#EtZw+t8MFUz z5V#+!rZmJQ{*k6s=hm;dUnMN4j9+lL)b`OPvazdN-_HNynazcGyBF}oJDmZMTDj*& zcxb8cwUg}nd1rc#4_->W?9Dxk|75bUWi1-A)q|FZT_agbSk=uBK#C{bfCH16b^<|! zkwwWMLV^bg^?%pbvQ)&6XW=ILNdkVO8mA(yiZT=mQT~W3m%y_mVu>#tjAJ8-GZmxn z&&S1WgOmSIkJD|@$51sQ|09Ff-+w%OiF!AuGkaBOJdu?eUi{}V?FcD1GL~X|>Euj* z8;^ZPIBPImjLC2}Z3SQ*D-&f* zD$sQ1MYuGS(8io47QLv|#a${Z#hL5%bm8BQuMz7)Ex=(x=sRytMC+U=w=>a~qw(qk zh%Y>Xwh5*2o%F3&-UCHn@`Y_7SQgzIL_2fU#b1Z9&42ESx?*@(sRl1qYSoWF;$E_p z`p|_;W53FZeGQIW;OJ#HH&gJY@vodcQch!O8Z_}Iq!KQITo`n}=}CQA$|svfLCfgA z+&{+GZ1_2-U$~0-UN`VXJ0*XclvWK*H^{HtBbbI|bv<}~Q3Wr6Od zt^9&vH5<{nGQR>u9vwR4!%9Vc!nHUA(nB%oLr69jbwlgrZ+E6FW;^Mu z8&2d$wp+zs&m*}Jh^-zx_y>1}fKCacVtS5?+CNZ!BAIAH& zYyZFG)@)wVB7-0q<9PTOw?$J%v2anRGKSP@v1+T1yeuhnTbs?D<&_8Ck7PBnnKlsc z622^3Rgag_n&I;$ATuR3?eJt(+un~yQ)=Njai+Wy%o@0ZGzq_skwX8$pGI(6B3DFC zlg>zJU_vyU#p@t8R0{UtZ6|~NTNi1>J6JO6YJuw*aeu_9rB#c))ok~ST3HX^wIE)` zD>`G;KUu3=ffA=~54nL8{&?-Ipb*qWkCjId&Y7H`#SbiHm zn?D;rG$~9Cum+anprm?lTVt%jc)tAtrRhK`(M0{ppL7!asY%TSW?eeaF90M;GoMq#UVsaFF%R0jH(2~iNE>r zn5fSENqj-=wwTzOV!(R(*~|6HPci!G4rJMFdxV6!m&4*GN$AeXptG29hF?Ef`o|mA z%PRl)hM5tgPp!g4;g7qWWdxUZ1KR_DjmcBhC3FIX609>+l7v2RqmJ`9OEYlX$yYmf zi#mOP<{`Xtx$VErIWaDn{$*582CK;J%I-MEr$E?Sh2qG^qFWT z@aSfhqH5Y7Js~I&RIdw{AfQulzo35~aBhJCh9O0qN`BZ0a{mEW9bRjGX)g+tP-<10 zzg1Dk?H8_OQgOx4+raFFJY6A~PAVyim@hd?Y(+Mj6GjbjsQKy1CGm_qQ^FzFN%4jc zG4ddm)qBjFPsS!y>jTWoA>XN!<<|nRC)6YKtV?1^Gkq0EHx`u~HdMn~reMXE z4Y$rq+7hLfihB+#w0>k!n*N>8C!DEk*(QaU*>#1)GShrn8`y~B->#f@ zcyhq_X7A{&9j1n_V*HQm`Wg;4SL`E*%z6N>dNS;wf=r(J)e2N;`STxjxH`^Pjy5(Y zIgJXIauGJ{nl{DjT*Ruo9W}01j{~d5E1Es?+4~mox9~kuX^q#^Pi49d0QGAeeY?+c zokUFLMN3QJ3AT|be!vClmhg#)Ge{A-CSwesi4}#(3_qXrW#LM_RD+D#VDl$YA&F3` zm^o2-V@7#@)axAin>-s#YU+`jq0oyN{wPP`g?v%Jv%u8y=;@~u2Ln(ibsodf%j&zn zy;DZ7$1OpD#bsyxz;52Nt??*rGppYav8Sht#IblZ1crEOGFqtj=;N_M%)cjHH^Pb? z1-3G0J46V?o1~&$=IdE)IuL&{`rBg$>Pn;MKKo%)McV${s;mY~r6vq>V$|%qgBz|M zw6A`2Y!N7)Q9BBE+e~SH^H+Wpd>jJMA`BLN)YuC&bB;|7nTy^-%KYBfH zAk+6;yO+nsdicGzYX=*zGN)3?{!MA?K_pRIGYbLM+cl9Z4I*8sbu^#=3ygJ;5*{m<~a2K~gZRwBE{_ zI&)vK^Kq)xl>`mKkNZGwLoSC;p^7iGr07>$ZNaKNC9Mn1Z%V-%i%D+~_{L`nheZ?m zD4puxo_8P?*#U2q0{4nI^&y-;3i4LG z-*sq|mOfftc+}9cr|Y>J;0o{qZHi!V9PkKf{u*&PPQl`I1+F#^{s%VeseT7E7{(!` zSjS1dxh?wpn#0LJeA-+8Q8yf;IaDz)cP~&KoR;KITXw=9KoS{PuhC&va)@>t2T^QFL`d&_Os3dt?$Y)e%{VA9QcoHtUPq+Fy|$Lz=%%i4_`9Fy-Xw8OxpzrPUA~5_$ z>v&X)56=uj>G{RqgMI9D!a>64fs&(OfJLY@_A7l>%aKmMtTnb?)=I*??cNm(evU8 zZ_$&y-qlhhj_##a3rpV1kJaN65Svr0SB;fNFJ%)YF?99Kin@Xx8o_QB@o3P$I3e%Z z>Z&giYtM6chg+&iT~17LHI^xvrfN*3WEl>m5ZQL?oA%)e>VSCh40`DK8fl<^2M3e) zZTo{3yIsa3x4}ySVDG|x>a;~UWoATtFW;tn{uG#)oMC^=0+RD}H+O9HJcwC}~?I2a5nrj}xRRwumS zy-0C8pJ{DX+hJ~OvbL7mP6Qi)Dj8Osf^YXNQEYv3Zf;e?27Nm?CccCMjFxx}mO^)Q zVI}mAk3U(8hz$|#&apBdhS;2k=wQ26`I)jn6!k<3N#yhnp2hUc>F6u3k7A~G(6I%? z*v5o1#PT@~=6vc?wz=UkHAj5X#n6a2nRk{CXS=QyYG)z1F3Cb~SPzHSUPCF)S6@;e z)X&d8VxeAFm&_bltyJNMozt*9*>_)MbP}bz2Ep_C^-ZFh5Oo=$ZW_cIYJmGoYNz!b zW|Y6O)%|m%M=mW$+IZCWBjxVz>;!Og16Fq9Q4yD{2bJNn6-|@l;+MXN`L3s+ro6+R zrtJK_-*-NJ|4u=`^bn+$;>-EliQjlBP^s zCf6S>mt52bFep>HT8s6W&UPMP$s6;}fNlrRE49YUjAd8)Jf}|arx$pR6MzQ!9J}0! zROs}>@Z-ceW+W+hj$V)2htTVceVmH3>!wj;FnZM-Fp>M?Paky?5VP$GdA+TL^5j=n z4J7gK&Dx9CU^|e&Pp6D|?zQRCU{CiA;S{^WAs>9+lyomZ)a7Z+FZbFM!5@|st~v8l z+u1p8SP2xdw?C4#9##Hthz`1vvF#qS`RPTp>6y4ud#%5bxw$-F{J-G#A(~$IShdF| zlZ$_ez=wj#-45h*D<6ORQQh^LOnrM#3?rqAQ*ni|oPq$#^bKx|6uh;vq_!ITg|x+~xF#d#himYn z7*8vRfw}xBQG^sDf^gAt(Sqq_!}Vxk%Fz4)tdInWY{x@ME?XCFOBXIvUhIPz-H}R` z!Wm*rZ+Oks;vBJg(yh{_*Q6#W}+YHtmPVHXsuBnh1j6%MKq23 z)0p2>QIXJilJZWa-Ppfn27lx77~2IKr# zM5K~sVZ>NDV5~{mTNITZ&B3Utj|hbWA@YbW+pnXI5TAcbD8W@+f}t4PFU%l1&1ayP z7biifz7OBw(;jY#Grji9yz>$ntIM4?_`AEVP+7GHOv$!o-&_D{RP2w44EqzrxIIc1 zu9_=Hw9rz{DTDkZZ-|OZgRlzJ=zgr*=zyXWbFT{rT9QLY?*z4+qne&&OOhml?lAEQ zqle6w2wx&UK=tnGshddUx`;}<%tu?-C9h8|h4)I!HA$85sOHkPtm2fyDNdX7Nf;uB zhzFn>bKs3uazL`SQhoh6$7spSExLuZsAA*TvDMTRZl!;O^dRpXBJB}Ldw)YSTW>hb z_9HOhvv08H76;X%H@RlRe?Q~VJl1l>FEt7QL7c2U`lE&b%=DY`fh<~RECS7t?B_UA z6rXNA_I_}9t>q;vPJWVXg)JI-g6t>#zV~sy$_D$EnU zq#Fe+O3*aG8qaJSRf#NRv;xNt_h*t8;^t5xNaHAQgSZB;XJy-F?IVZ{)`ItvU|O^L zjjTsh9lNR&yKm}17GjXgZkap*7Jo~shAs+0y21&rLi#IpHl0Vke1Fgl`m~QPGjY%r zcFB!3dabVlgOyTEbIXhx6w{;lCF`fz5uLLrOxl9;(xtO7Ob>hvyi0W16=K;aCaSSe zI{~v)?0he5voh%72Ss@U1o?AdkVJG5P1bXR`r;LLX-gbGjO%x!jZC70AUS{%p7kbW z0xGnG9sZYg5GpYW^>!7l#7jz-2bi}IE0oRQ;f0!hJ12Md+v{;HJAZDDU)#rO59sv35*??|4Ynrz+Nu84v`mmx-?m`Xp@ zVc3o5Tvuk}2g{7c_USRmi|e%aeI!<{cn#l{_SZks2>Nklx-Xu?e>DCtV&Hf(`7wIoZI&A(F;pW6UP^PE>`cnri;oEdwfA? zDrBc+t5H5=XYSk?en><6nv(Z)eKrZ+`!DneEtC-m4oIv_Ze*vhA1lG zN}t^cDxQv(+!H%`IUw|Eqa)H7&A(pOEiZ*u2mI>pdsr4!>C1vmO~v_U!SX{=bTM}W zwFBG;modXY>ndOF#WN^1Jb$ekKX$i`+v7?DhQ=>Kw;6F$!DSoaeBP(F$sE$cV-?j? zBy%FA;)N{^%KCL=Jn9s=y2F4ap)o4vr47J1R2WGn^(qjl(*#Xm;}#YPc;H}o6_LrN z#ruP62z*+2*D(^LREUfYmjNh!up3nSTm-bUcI=#%FEi+&PT}nX}$@e>I6?2q2Ln6O9|LH6P=_ZtFN-&f6}%!a9-5a>R-prGm22 zBkLn0*F=-(mwG~v(8P34o`ARdY~`zGd54q>0Ztl0tyx5B`Hwy6v!!&dq<5LIsbtxe zr&mq1UFQ7!g_dx!D`uY6;+ec^);Czs6Qieg%lHOrpBBURHM6+P*p-ta;#pqw_O6xA zza_ahD!sDIottpz$heW@nkdo8eO#}HaBhTT4k~oPGTrmj=Wmn76UHTH>4uy+ueRec zN0dF^_D<{GTMf&+psDK$YdcU=X4+Qr0jrRbK3xw_V^Hi1mMoKpG)yY+EV-x1BS0V< z18TvMfsv;#V1Nu+TK`z%d~^X3^sS>R(aNnsce?3_0l`Q$iC0D-#0??Y(^8o6zXm{7 zBT-zldm?Qs`&_XgO`OjKN5;J*&vVz8#0bTEyxA@r27&PVYXZ_LVFV4Cr*Qphat>-t zV%i3MK7t}#`k1f$-Emc5)p4iN5kO%*2?}Mvpu^yLxD>W)sGuiYFqkJx9oHLF zam36qOv9z$j-jjt$t#L85`GmyH-7~b785-yPeGDmPLJo{m?13er3!)Fu@${xc^?P{`?cCd~-H+p_s?dEf>jorF1dPXXnAGv<4_uBAW z`adJy&*ZZ6)tbTE`~SP@3C9J$b7kb3wU*EIpTyO@&*k=TW$c#Me7)EDp*JxRKiJ1f zR57Bpk#h9N(;z8oOOGKv?CzC*j~klc)Fk6BWcG^7zy&-x@TkWS%kC{``^y6zuEi?6 zaiEzIuT-AQ{d^Zx3+W#p=keT5($uHieno1&wa1E?zy0{L(73JyX zjpzr)e7u~<;3r-%#go}IyREH=j7URTOMHc?B>&#WDCbRa9c^$HR*ZNxbu==8y7nWW z6vj%|IXIjS<{+#erMUmZ3CKc*?CMfV;d_Y;p6owXM1ytc)!Pg1=KOE*kz7*d4_o`D zANRAWlquH08h~wc6fvi!Bf9rZ^dh}hI+^3oNjE-z-&%SYhX+`{kYCiD^pA_QRVB2< z-qduDp734bMVdnLs5y!_k?}Mnej?QB3u?zwkH=^2PQqNjq#FY+riD2oxCR4beN%t& zqDUMlw7?6!>&MiJTj8;*Q$pjbgh;O-A^8Px7P}36_4yc9iobb1Ny$x`HdFF5WPu*@ zLbFv>l8MUh++Fq&MxH?N8kMLKZEpYG1Z#a3E8Z5vSK@s8(Ut>=fD`pl=pim`1me`S z?U5JfT<&0qq9W_NqC9m-NphQ3g%1_EDlrB1K{wz69JZ1WlAXMvKy}qD?Vp#ehVXCG z-q=m#)>@! zPwLC}O>@<#SVV6`A>0@N4!m|1o97f^+tvS4f3DL6&yMqTSa$B;7JEpavC!#T@tdC* z8vCijbMWzTeAf~K_zZ<6n0cTP+w2`j$XW{nq@^RFtu`c9o_f89;p+XpbFH0$|Dg2a z8K9H0U90MZ;1f7HjVv7yr`6Lqcty1pei~=wF9*CLnx0qXd5fNB?Z3NmkRrRvoX{fJ zJksM3NU=3giBK0;H&CZ{rs`uFuqdU`p>p@t49G!!K+w_O)2(Dq6?hGx<)wRO&o zC~&Vbf-%4NLM-IWy6%+S4hBZkdQDQ)O4BY{9E^TQ%I6V`QK1Hr&cxmqIb#Cz(aSg* zIYS!aq~g&x=Q~p3JpJor2&N(;1osK<384a+k?}+ly3T#3eBW8QEdx_pee3*xoB~|y zm^$p+xw|Uw@4Ta2!Lg>q)w{tNC{zFQgs+)uIy6Z3|1^sqzbh$C@Dbko&^L#1Pyc1v z9p*lwCdNEa>Hd}`6T+HD+jA)r{$+#1xfjC+|2tBaLRv1N7VG=holl$aY|HC92aMKP zZ~C2dRPngFZAcrP#qjpRT*qw9afZ%&f}ND`HJQC2)VDv(al8=`YOcKk#N=EcUz=n1V~eE4n%G!Hs&$;<3sTlI9%@M>-T)QQ;-UU@HCzNRiCmsYcS_Sjcuyf^)K>3 zlR+ zc<=4XnxlZI^(j7sjYs%$_eK|pSZY$U6ib!Mm(vDWt&$~QE3=_K&cXcN-Li9)AlB{+ zl^`?9e9xuba=Id2)|Wr@U3wvwyOY4U$wA3*ZkZ|ekdOXBlv4kXgwsyFHX1M22`+{7 zgW(}17I8y~yl7)Nd#ceb3_NLT)Kue4q0Dszja|#wHnK}++EYpnfV*g! z3t@GafDry+|JjoWhO^yK5dlo@G(}`r0)-yn09;0M=h?3q>e|Jzn4;cnL^}U6UO^$x z1Q5t0hPSI)4c^#hl3^JD1-VGoEKbLgrpjqV_Wq`RSf;(S4B~#Gu8{(v1b423Uu{CP zU6>T0VAk{weC>D=etK95hSHEJA;4XXdc&atV~%Sc2!JAXNT}a@Pc5J!lX0v*s9^Kx zk{SODG}ev`-k3`t17inJZ?>St&|%BHi@;2~tyxpWP=V}?`lu3@uQ_Uxv1P%IrC~yv z_APe+a33}FWq5z-bS|wosTu?sTYI;WFvLe9xJ(_1)LBl5qvSWN?V)7p(Hr*bs zQKJkK1o4%3IZq_7al@?m)mAQM$qymALj&bc{?G{c8Y_FLqkw90zyXmj>6*IvhH-JtA6^c$kCYNhcgLhSQjXmbzB=eBPcc;;JHlcW zTQ$p)mZYT&Z60vHG4Lu4* zjX|XsXYAYjA9#y@75k2jVS;){?>HNGX^>@h0afI8B~%FoY&#}A7s9wr+1|`C-LMA{ z0}t4)x2=h3s*S*){UNs@n;=hogV?raw+}dWfZTWVmNwIB3uaZCq7dGEq;t50rl+Z9 zlibsc@R}vU=9VyA-mb6nv=G}78jqO`_d4{Uc^pNn@+Shky<>*;0r8vjge|+wnTprP zq9Pv#vn{ntg>GZU3`*mov7I-iPI!xjuoRn^Ous-G+71YNd)%#`s^-3PqrOjskjXVM zbEq$8?q!RVF-eSP5E0k^FRxt&Sf$q51{NQ`;ik=XV`ed~B~4L$+-H;4#(KkH!h!}T zYd1A#Uer{PW-z}7rwMb3IpeG-6<5G0 zw>PdHjNe{lG(uHRbcPk0{{t({*X$3fB`U5N99=g0p4QSi+XAg7U2pAN;Tfh zM3>0*DR9~RY*zDB?)snenH4>hl^{~l9SDO1I^craS-d?`He5JcHY>7FcE-NQN^CEY ztB&V)!nEEe(1{1G{gd!E>&VTa9J>TU$u1~9mvTn3M^*)W71zB%CI(&0= z-BbFmEQ6LE0%@r{HP|087I}05M<{bf1FTHuGRrAd!%7`R3pz472XILhGrNO=X#$KV zU*rmXg=!PJctYffAnoA3mKfL)TakG=wu9A?4uv% z;@8_4I3$g(Oekk;99kN10d=d$Dl>$z4cQd56kg@!xEIqAvXo<$V6+!@x`H5i8pzIQ z9I_~tVY1wI}O1V5DqinN8|4Y%mDhQ{d7Bxk3SqfuFXmB+xOsgfi023Vh6 z7)^y(s=!7A6|`c%m2epc#mFHFQyETTto8IFV>^yt(lK;$Lbe+rUVvK0XgFR;?f5*D=6a)92xWjRJj1@VIW0` z6eWw5X;|e2mH&Aqw|NvLa97g0z|!Qy;G^0Xs<^65WZ+Toj44g)n^V0#CBQ<79RbRd zPT^3ld=#c+zo9hx#D{Mq4st^+D*s23b`AaS2bMxqfX9GQVk&r%hLax>E-A3tlwIPC zH(d`ny3~9y*nyOlIUp6!g<*%4#m%X5JDXhhhI*#gE(l!MVF#kv{;qX;6?)9YP{SUq_N0LBBs@MDnQYq5{27S)Y@KT0w+*pN zo-^)L$-9P!SK@{)2+{V(`K z-d@gSY^N?#EgY?U1~)ous+QA2Cpv+>pk}fxsx+WCS=mXX%0C!FB7|i)iPdd1wIYc( zm;ri}_v_%un0oU3+f4#W!IGIvN0D0Eiljyb#l@`+JW|wB`+&wEdAWs`#G+K@vK?8} z1tksZBY9y1>r7BP@VfIq?kcKs1(H+UB?8|=+&`#Z3k{3 zRfsL0eS;z-s|Vyy>a~3cNIok*zo)1|{U25-TmCy*tR@EIwK?{NjxGMIM`&PQgl2zdViqRs|&%O8hd*@ zuJFGa#>vYDHStB+-J+YRPp$zDDlDkZ%IldI4>GumAK>e7o91#-@E!Li;c3Q>K6J;+ z5ZtIddEt-fT{HX!O6Pd*9|u=yp)+Y!lK?g;nr8elHa??_QA>$V_XAxm>NHRCkC`?PxJDu(owh?%neAtKu-H1*&DI`^g1`Pb4&?7Q0qS`v$} zr2mVxa|#Y6Sle}+tk||~+qP{dE4FRhwr$(CZLgSr_O5fP{yNv^W_qft`f8?XdcNoF z=L1o)ymLig30r5=-VmyS0^vJ&-^Z-*WFW^m8%mWkzdLw>|9%;WQPg-tm2O{;CrYO;yV| z5|Wl@(2^_$r#p=Qm)1q8glAyTnUd%PGG{HktEG!zEc6=MdjH zl~ogxTsS4*aJ&YG;}&rT;>i*7Pa`cR(O9pwV^wUlZ2Ah_*^0@s!6I?-x};cU+Arhi z{})xG`3vpvQq4nFl=yF=Il34%UI3fJm%W#>C0d{-IkR!Ive*!*)l7}%Pj})-YQk|H z0Ug_A*heXn=0eEz32WNCgWV`97O02j3zz|BB41Q?_PoP2R>XW39z6J7WmS;NDlm|# z!;HgL(PalU$5PkAe<|AI|AW@TjeaTG&BcEhTb~Y(q&_f>M^azOFJ+7SFM-3&2;-5& zx0w3x+W){e?hl;C6wWMdIuazfe^uvLQoIq7S)Jdg6-Mgh;uN?8w1lk z{-7+h;BH~U=SNz6z4$TfP370OS~w89fUI-8B98GAQngB3D7R(9fhVbIHt zs1qsGl-_X~NreuiY7X6aWRVmBLy2dK7cI5>wbVW>L-D3|3qvtlp&tUowVljS6xQLz z(88MYpo|cL<0K%r`;xwD``&YymECIw7Oj@voTz^_cu!F})$7kts`w?*^HO1(MwW!Z z+G(`rv4eoI*Z6Yqyui3~jDX6YJ`yKVIMoXF`)nZ2y4e%rdK{rA9T_9mI!aNQiDcE6 zbw~kGT?*b)(9Alh?0KLQW*ru;?YV&3z(OG%B1=yYN(WKUba~pbsfEHvIw2YqKo&m9 zimU$d?mc})A_}O}ZfaELkC}3squGG%A8V~eW9>N!y9gneu49LQ{PSpFfK~lrh~Wq} zD=EYt5?fusTzMHmERMH#6m^>!*;=2Tx6A3-(o;NIz7{(FmJS&Vn)5KUE}6&LwUI+K z0qmnUi5lZ@BgJF@Fa2l^OUqdX^SNXsYi4!@W%OF~kt2oVI>y3OdpJgYhsgBE4j|ai78et?E zomK%;Ml&IYV8TC_;Uc}F`ur8>^|26x1V5F_q$f>w%@Y8f^L-2>mu}Kf-Odap5(HH2 zeCnWN?nnDD2}}ghiz!#D03qeZqVh%&Szs^{2aK;C1Vhbut19LgG=Ryb9^LR2&g7dN zhRopF@)Fa?e^Z$%Zq;MhwtNnqSgsr1I^?!Dd}1+Jk`3Et^F)&Wa8j}w` z*SDoCbV1o=8XNu~S3r!IIjzO?L8rs%@AXkKWSn-9-uR=7s2nK!)*2`0WNpkmyriD? zO`<$7Ac@id^t51h!av{PU3Jk-H!VX^=BHyMz&Qz+=NGGiwc)Gn>4Z6t5CC3Lesd?7 zN~YifuxfbI)i7#XmG6wZ`DpHC&8`!|*~UJPdm}tRg*G?%P#zY6MTqpo-Jq#_$dtT5oCF#u`LE*5Wz) z#wECNc|O);Djz(`S;w66uu^lVPD7B$zU)dK&LyzNK1vRb#f#0q&Fw&6rOnTp{vIfN zK*PD5GnrdO^9Js`rFqwA7F|i^R;$}QV;r3vK5?8wIL(UC-agS5oRfJi*Dg-cx(*Ht zj~;}hiX4K}$@01n>V!#XCqo%1)iyPD@G`A4rdxQFsK=P~WHuPNN8IvlWwGkDRyo7< z7rQ7wy9)lpSjAMvXAMxqqwDr|7?^Itgkj={luiboCVL;U@9TTML)kCbBjofEs4G!| z+9T391BIU#apkC-w`60;iEKQun>$cZ^pWNtYKcR7KM!CuRPWnhD1>!lN4RNjn>hsd zJJW5>u}(Mc-Nf}VCS>6zXSVH}M#0j?728geDD1A7jKEBVlb@})N-)91bR zAvQIqZ{{#ULPvw8guPktzmVGREl9;+03ydJ6^jGc}tgto)YGJ%lR*HFrR3-wicWe zs7G#yDKnAm+42zKuqmRY0?NRM9-*WJ|ADu*arZNss?c!)4!xPm`%i#%@8PYlsRYTT z`S`5IA~AH)Q${$g1<2*|UH0!8-@?7wICL)-_TQHXtV$Ed=sc@BEt*5|n3OG0JTIcB zN!60@EfQrD<05vfL#AVt*j`??@cqe^1+G?b}&L);@^y zw(?`+9RQ~?nL?Ew4Iw1>n$+PG+@z_}$2*C%s9ZvC zaU9mOHtKRzHq%&l7IW1I){d`coA6Jeps6ddE1zh}>#=;35OGng=fWNMl(_~Nxqrb+ zw9VVl%A?)*dHw!htnrMURXTUVlsntU>zX}JAa&v>Ad^xYs;am^;+dN?P3<5iR8rYk z1hu3jX^S#veV?FoPAc-;c+S$-Zyee2Ae}|`Ng=H-DDbg_=qkv0NsG!W$&(urby4)_ zW>Fe*OHeq)m;KR>+i4Zk+aQw=Ii9|QfLSMPt-6W=kmn~xOY*X8Eb#+ex9snW!)X$UeU^*(YfFS49!AsP90>%uboUm?F zkm}PM>SVNcE#XcAT&qHLbdE7qST7q$pFiUn6U&J51Y&UmMl3G>fvet!Xwru!BMojKv4=E$_tCbCPAz>3R4Jox~PZhPeaUUwyi^E-MJ11JsP@N^BBJZ=P>K&0U8E^S~s_R$`<>WVCo3voOy6pmsQxw8}d zGSiNE`@8is=Y$^q2{4oM%qV?|%!zI!5CxEDICs$`UJL$7Nmzj1k*mm*IxgQf(U!ABMgsTO1 zQ!+IDm8oPokPjL)_WO2?;=^3LAhGJ?OX|mv$Z;jDe6y}5l|OS3AHhN#w0JB~c>k(k z3~tj#O^n(@Kq;8-vhtr44WQkpM`*oo%zn8&ML*N9f>Al%4bAy={u?3dZ&pz}ffCnh z)MDT@7-mxMoVnlQ$5-&xxNnwm&#I>Cu9c@G0I%8)I>V-Mpuhz^e3M4ym|dmKMC?Mh zf=?8Mdsm})V8PzTFQ|L~J_Z`_GMci|Do>-F-WXmm?!4A+T_Vh-mc)p3w1s?EH$J^D zm74ZBS@)b}f)MsUoi^St2*y}!_K0UKl|P2cnaGVVK%g5EJU;Q2b1GsiU=m+cicRy9 zIl-6lKxSVT2Y%^CKpK+>VlBfxK1=`YQp)nDkhnQn;K?H!8*?5bb|`7vjeNdtu0Y$a zZk0=R3Mz_G|Q)D&>sBX$k%tH@t^kGOuV+)gn{be{RFT_TLgaoJU$sjA7 z1rd)NkrU97=9I{$1i}+9`kQ~2CQem|Aay<8*d!*@2d?d!I;D=ut)K`jRQ0gTeWWu? z8a&XyzI!sz?jTiez*>!5QS40L_G@8khY26W-$>kwPvmXbYoIr*aV;K{@#}1$zF#4z ztotJ13%ew8N8l;de*crPwoOR6bBC?Rnb1+b5ON7!;2*qejO69P_@ZxK#DRBcV_k%i z1w~rLq#CKhA(bX;d3eC&{y2YjU(pQF>Ni6nTtcZ9e5}%WG*tzU4jR$@VNY3X(&;t7b^Kl=*Zj_DSDTy4>AR04Z;&Z={lI0%; zINi>T&a=h_O73diMhzykqAxge&j_41@XpWgkb5tM0*tT1eSpvSNf2gkdCGFBSUs8D zu5=&JOHT^4NmjPA{yaN+@-ErHfnLiGWBpoIRuou%t;Tu_$%w5=x9iSHep@ReRVP+-%r9^{iFqzzE>mN$z?9%=+LzF(7to5Ar5KAo zF)foFStuJ3&gX>$4S_iV;T)r-ruieyEX?x7?wCa$v4_^41UjRc%x(%AgK}yC z3hLULfDu&+%HC)}THw;N?*p39;U~t(OZkLjP*l&^*miQ^4O|0NH3Go=P0IBi5p6mS z<01Y}thLlr%34r7yk7_21A4Z3bX=u2^+eEV$PLHIWS#GvrS`erBVDClO7BoF_g4e& z7dV&3IB`5H+Vr>1coQ{#L-iC(Wd2?#JX}UK=&Z98zx*ukq>4t^ruNkFDS>5LPiCnG zSnMi9LS*ogpC%YTj8x8uK3$g4W5*IIRIrmjp3RxDbc=2>WbJXAj?f>HRRytcN!_;Y^zpH zPIU(!+=hF|f+meXIYCqOA{Cj{e&xW7=RwDQT3&5uk)g+t$(75jM=x})GarG=7U%W6 z@sB9`!s^4E0}8;U^-#xnJFV*eEKEuzcQt%D@ua#S#E!+%%!p6wM1a$p8WYfNJ=M8F%Eo*HZ)>dnmj|RgMtI5w-`c(@8B?li zl4o!geq3B|h$01!UDcFjQnM+w57SyoeN=iu2&>0m@KlX6MIW8eTh`oX`f;R?<5w?^?Z9V3o84?h+Aie={&niGAR@DFilq;0wd6V-A z0Y+SlS$|}b=+zR`lF`@q`u2hK{t(`9-7b!8s-De!=!FFL8@}$MGq9PUJ=>lCh}uW^ zFqME7)-F$+-6j1I^p*by)8jjmlOjnZ=45{+250E))Kc#eYqLO>4t6tv4) za7v6Km*x;TFR9WnT`1!>ys|Y~KCbi((~=&(mGYb>p_zlnhodBK_{}_?OPV-7^tikt zZdJaSQV~!9hLuDgS@Iq<(pSauWaF{KnK_8k5JJ7(q)uyK^S;M)!FrIgnpJ8^n)0bY zdH4I838~ADvR9|3llu4Jm<@Ch@tQ&Q`w?KjsV+OJ`e*eCP7wxkcWn>~M`!GvUGc9+ zWs>T+%3>PxWjA1Dl#}=}6aE6yE%Gx;6GE+s4^pYWiox!SuDN9J z-j5Y5S`v41LJURW2PwLA-s>VRSI<1lqsl6@w_=K~fJ~+$&7)V&c!J=yoNoD}T2s_e z8A{+`!rX3Ydvx`x-I~4JYW_pWeKNF} z9>`LLZEChiyTqZ3@-}&+kKBHYXlq2QL!Op(Y^%Zz0<zY#4eFO()z5 z4q8!XqLW$J|5~D}$2Fp0t~1c(lUvau_Oqnb17vNpA&6L#sHc|TBN)Q%bop@KR==&T{x?=KS<$G;dNcQwypQrW{koh2s8zh{`!` zfPiz;8e0$aGu@p5qJ@SoyWGC56q$;Np?b^QDR=TnqMwiA8e_HSpuF!>b}H-#>}?3qzNuWrNzkfUdg#G| zWKXSm*SfVnZk`#tB_guUW^H&=vv(HQr*+ehz+mzWNqgl2aI^syS1Eha?=a*Iprfas z;%nBYLqujpZ2ko}cL@dH5xoHb_^)QLx^+~)?#a1a-8^l{eB2Qig)sEJr0i5n31~Wr z#y+)tO?p)ESPKum5|Rd~a!jrDPS%)t9TyU8f}%|DNq1Q2vy4EpVwH0JA~CWes0Lfy z9>?By9Iv%iz6lwenI2AAA=|V}9EM*Q zppaCQ0*it$N861WthS_DHhbk++f?GRwg$2MNjvwiurQKbE5qVSi-8rK=HXx3v8QMe z$YCOU(QSRXc|XsQ9FR#w*nEf-c&f(XFI)ux4)2jz5*x)0agitrys}|zO&(0yH6DWc zY^?i(XRyG?aSFftf2^{7=OgILSCamDwTr&3#bv@Qf9(RSk>z5bpJ;7t?;@3+VO4iC z)ky;gJaiam}(xAJtnK#Y!LY=##ds^Ckbn|Kn zipvSuBul>ZaLSqn22hT1iK%`p1t*oo8E8%BS{qb3Z~t9_0E*(XMtD9F7`?5shVr0l zg5*Tgg3OMf2Ab6_l56X5A^VSBw)(dm>-l$Pz!ccMtr`&gbf7)}u3tjgRP}*7lO+hB zc4-oW2yZLL624Hyi9#-%I;7>NOK&m|eNt&31RF>=xqO1!lYCO0`Ss4PtA()b>+K_j ztOCG_ko|d%kl5EFa(6*j6*;BX#jqKc4MHDjJR=?i+$^vM{Ri45I;&E~FsEx!%G>pB z$8a$Vqi<~O!X|Ek&D0`*4fC_1I*QH01zr`6aNu~0373p;No%^mp~8~)&Q+T|v2My_ zhe)gT(S*xSkxZZ_xJTF|a&pLERIBUoT}H+ynh-_Skcdi4c9Kmc3@4sPHTCG$kUV4} zNkU^BQ5(?FliD$}HX5J!UFbPZNmR6~TEeEbUa}wS2d@C#9G`J^$4EO8fM#O8{e8&g z)?q;uIl&FC!!-QJ;TY8KLQy{svogaY^GK%E{P}WfbEKf~P_($IP6G}ITSa0U%kvTK zfkTvYFOCWv>i?J6E-u(C%h@c+Mo!ca(YzS2wraOop%JzmmY zA!@xOb*PW_da@)oTN`QtQfHS9FL8rIn|UY(RSI&dX5*zo8$I6c^ABz}m78wZYg;a& z09J!;OXc*3jx!jBQh^5b9bImD1He3hA-iW(i0Tsr4EugZ+w`*Z)+9XLs;G#w4n0UN zKBuw4~fh)r7}HTi`=V)OsZXOXAPx;3A*xO23AJWAJ?5oi(^3%;jbX2#1iSC{s`2=?5_(^JntJ#?Yrx1W(UxVTnGmPKf z5PQ|hXS2_GzUw90WIhBCfk&A0h{d6*Wai``Me5{NyU0v8VRD9b>~aYkgBg^Io=8Hn z&UhmCfgMc5)JXOtHvJIt+klO0;Lcwf!Y1?OpRhIaxTz96Y2^;_d+e6936>OZvI)AJ z++ofpA!Ojt=!3HrmLx?BkoGXxjqj$XilW=Ke@^%`|4ACB1CG=LtI`1t>BI> zcev7fVqdfbz?Ow-sTzOYds#HW~fg@92 zqt312F3#Z#bIV6+L0!ercEFkgEcuChzLq4mslRv^iz8+xid=a7)}TST1vVktV!5<+IHvyS?-NmE)c)j4~ z1T-ZdzP#EIk#nF%YWor4%|hpC!`JbrM~E4OU`ok|s2xyrQpChw*1yTG0v041akS4#r5AvnNU)%waegf?bK*mo>V?mGC6fTO$Ak=;CVX%>2Q+dm!$gk z%QygSBcC3X>&fwQo>12G{ry=hELg+M>Eg(?aeBKZVUL!)%TW@N84`(?7Q) z;M*Wt!%7RSy|P#?v2o(K?k+rF)3fzfZh|_q^LLCV22=(6`9M`jJN9RO*W+HFZmDJ>TF6=$I<`sDlq zqd?`TF(Wk6RGK3nU)%IwX)y>gMwJ_w z_FMDa5y)-XW;utD2jd+(Z6P32 zX2{(t5ObO0r6~JF`9byhs}Z-W4HZqB?jVp*UKXg)um{$2+#-w1f3SDmU3Tw`;`{o6 zJ(3Pb@(5f0X;+t21xyk1(ek|Db4nipLzQsJb#kGn2?|8c?|c298a`k77QCb3uPDhb zdIm801iRm2?Q66$)^=S4dl8koIW6^%4J^;o-unzb!$(vy<9BU!gfqb!%`LgJ;^+^_ zQEX{3K0HAGP2BehwnE-jE18L8>E*c++Q{D*S{pvk=PNGARO~esK1E3^|JN1&uqefw znP}RNXg@|}4fO26K>aE}wDl0GGElW!!by0S^A&71Iug7DS(wFY+-2C#2WoK>8u<*GcEV>_d{kIn7=?4uwV!mzH6P$q4%@ zZy;tSbDR2ZW~E;MhiariLeOPS-+l0dN@RKIq<2SNJ@cpdOPeXzk^E5w8E=!eC$DHa zpC{S?@udnW3isO%NRB9hI|={?#*iPf>aCP_AGKVFdC_)pv+E0ggVNtc!`POnHQJhV z{7R~$5MBmI$v!`ef>rBfC-npHqWnb$gtIC(*~q*Y_(ODxmDFlf=fetp4HuDHu#CL^ zjcsS3Hrqokh|5;dip4sP-b}6MYi7nm_UZ#Qai3EK{=iW8r;FsLkBjcqxUARI+WobafpfxxyF zb=7m@fZ3rqgP5RsqWDTA+}xjysLWy`x{$^8bd=nU=i%)$y$e(;{l}BqIQABdFyzeh zTpwccgb(H;9q3X09AeR0d>m0^xuVvL+0 z46oE-#uhBK1}T5qr-BjU1Q1^ac(;=pPIx;HK*bEC+TbjPU#2N@{9D&j8qVg?)${ry z{W57o9=0y5j(cTXY42sk_uMCW%apVFPYg|6=lOgmOy*-jP{CB`o(AOwH%8LO_}l@~ zFscjNen8y<0+RSd zdIb~UOI}qcAu+!d-;jNenGBdQ#t(0dwJ%!09E%90y6yxO4%g$ofDa^Or?JlDLMyRn zt0tgBGWWf~4m-<8D>XNkB;o3UeGxjSQ>&LOe)PfR+PENMna&1B!u)fpT=T;#Tbh@W zh}m87&^Lz1A~E$E60mZu90u6GT6>Udfj|)_Wg@#~UG!)LcOkL9QOE{^9q~|g*=r4T zl^2=65wm`2-4=--OZiu~wj~u$v%9@4!+L`2>AKorrcapfi3!6onnB_~>ya=V0CcQO z36&fQJ_g8-K9}BI>dL@Qs3<{5Xk`)Tzc#YDWan^i4ux+mB}xD|%crBK-QAm0bHGQv zyjUcks2CYe{az%>KP97&EG}LoxuUUH6>W}JZJ+X$HxCO@0vwS#l}tIKJ~NI!NkeFp zfR7WY9me!iw;IXu%)PLPHSmvdCRKyl$jvTS{;F^>+jEoiV7h=o*%RIh4A9guS!jLq z{VIYmARqAND~bILD#0XdR49};eCO!Pr@#x*F{;(d&;G7K-LMIe9ud=nDw*8xR%8qj za~CrJ^)vHcOl6h4g%h4^O>eY2wf4j%^O#!|Qz|z@VKoINR*lh{CDTIPqMp&oDp>YrrkJE}zLnzK)mZPE`GmM%Xg zW3!5e-Oz(NV4VS{V47k=Wy3{QEC8N+^`d|o8+ZX@sYkVS&Ui_siPW#O@W-0Hd7)bG zfB54bwce5q_q=eI6|vi&ip_=1w=9Vi499d6e{yzX{pYc6-OY-l-z_$Hx8e0}SaI?Q zw$da=UyoWSjcE{54lMVjB#C{e9Rq0}tZi)&IK#c_U*m^IIOw^QZ)FW@K;5vdMYVpPj+0>$#qtBbO(}?6%9~oLeMWQ#5GE&lSAhO|u z;jg!$O&pTJaYo%okvs_0wTClql^5xO1Dae`fm}hr1>9}WI1}wM+$2(&6osG;2(1wY~FEyh{#wM_|2hv0) zi3bZ{nraPRNn-Cgcnh2twBVCjuRbq=(i?Q@6x+9)$e2w~4vxi^D;;e^P@Zw>chp=y z`Q~L}+x<6Hc@3!%1<4N-VWI*{58`{x(Ma?6(KP?p_hF^GZwcS)R44`yjHJiUiKm4ib6 zLSaM|K0u*c(rv2k8K|gzg)+g8`u;5Z0y}Jy^DLC5<2{54lpQ9UT)ZDdQia-PQiJg6 z!KP{ZSZH$cY;fyN%xS~02_S7$Y+vYa;L1>Qi|t3HtiX{SA;jd^=h0K(zzaesH7Z|` z#5f${Sm}JpIv2R|(7p+PpRMt<`iWWkkspk!855LSHd#$ZjgThN32+<=wD_%qP&4b`xSm8Fm2 zr}x8u%>XQQjMZj8G(-P_L9P-u*zMmjz`1k<;@Hl*aQ}0{xu|Rb8t2s~uM-Q5YkiI% zWj>_9ssL5_aB0Sxsyq~!KN9G806bcXV}e%reI+@sE1E0 zB6&zQwe(SqjNtX~azs`QvNo2fZ@>PnMRiuE+e{&=x^JQ04X!&I6ymew7~t;*&yX(1 z0blb7EKkCgOzsAsFBs{$+%E8YJC@jL)v->e*Dd4(b+VXiT9K|mm3_jFE}q}lH`^}& zkw?BA$w?Q*N7z3|f3(x`w<(w6^wLp%(CoSb{tZQ_+U3;dNvMXro#Xdmj7eule+hNg-o{Z%ov;BWzVgZ{bnidKaPZDkSbPoN_w&Ip?yVMgcSUzZzH#(GJZ3Hi znVg=SqV-a;b)7bI{V?OR00KsIBG6GOq72!*b0rrVg!83OJWTBhNVOC5QwSL9sVXBY zkH8I~#)K)>3Ux*rH50`<O%*AnRU)ZQ+p-9w7i+{i2}uzH{^%LYH-DCZwj-yHoYz4$hd5sOT&E&1$|`{{=zEZen4I82QjPkO?{MeZ$hJFGBX0nTctae(_0l;JjGU;vjnR2r; zogz|9f-zF*3XjpY)^cy?cUcbMl^Rq2E>QIV5}ibYnc({xLa))`+CP%81}bT?n%wH) z25R1LX;4TbuQr3`_~7>ZCJ;3$RLa_mxKp6NtV%A<-XG`h4+r1x7c`AD5_RGWeon(|&rHby@P&J1aS2*7O18ftf@2&T09d%)ROjMX*As$yyU=Pc+vWKx<+QF{m{ie&dM*qggalYsp6tdDQGJ#zN$ij64h5norNLh#Ck{;D09fNy}oRQ~p z$A3i%4)VxLIic8!)0A|>vL5TGTa%cZ8>%6V=Vn8v?2DHwgp1(h+2U7yQ@F#_#c(dw zM*(Ep*mjxe>**gTh0t;@m(X8XHlFcDvt~MCVztIjfTNW-uklOy5E!FuuBonU(FfV@_R{=Tb}Ql%ddcV{L-84e;|XYyQPQk%-fZ zPk}8gMaXg*QM?!aEA=v!OF=y~#UuCq@I0FX_(nG~v8T9%GXAn_iPYawn*D1H9Hen;e1J=l83_@R3NbeLSn;`cQQz(*8o3usOC!4-S>N%P>G zsh<<5tg0dabE&{Vv63Fezueo~6->tRv53bo`%+}`c&eZ6`26#M~gErk5mWpL(g&**`CbEL>Ma!jl_A*donDQC73*@f zy4|5*qBi=+(0@Pbde_zblF1U?57ll^9a0WiT`ldYQmIdNITtiiPXkqb01+pmORDWc z3c>cQv03>#%f(EzEWOj5Ww>E-JIc`~vEdO@(Jyo5r1BpO%iD?M7=Stgx~jdQ zmeGz~IA%C`Z8C9Ex}>kyi#)y7c;A!Tvh?{<_ylLG)QZ@$6D6fm<`fCAvYCQ**$J21 zHl%+pe1zqEDwf>0VbJ->nGmkqq3MG9dlIXso$zTSMAIrmcZH{SmbT#Fn(82J+(Yp# z?WkvSg9>Js@~cI~z70MLnY5V$=pShf0`;sK?E1C!722*9R~8|<4b7w zyTac_Z)!?5lF+Syx~m%IW+2#NtS)oClh#SQQe11IuZmLYUbq;_my$cl>cOpsX@0es z!Obf$_=q!PpDaTaeMv=xV@#nEk_3j87gm7Y@2t9;cZv7|XO(!<@{L<9yWs+}_2C|} zLT7~`=x;@u%Bs1Y1S&M#*8DE8ydAu}opW>`bWP2@dLcEsXj_L5R`XnjGFCIeKt1a{ zW=+xwA6Hnsh7J*Y&llP|&%b4q1ICd{wlntUKd4=tf)~}!^4A=cwJf6yP6^d~I88M` zmIY4GZ92eD8=lpiXQn*S(S~S3%giko!&YT=3Obu*Jo_qaoFku@7q(ZTKrJFNF|dYR zUd}NW8S#grD?Eu-SP37;Uu=|sjh}34K|e$;la$w}g{SSf zT<5DwI7e4%D(ZaH=qc*ZRfvMBu~|IZCZV$IfIVtnaS~yQ*f*3Q zKiKPG<5f^Ki5&-8q`kdvBuk0F*55IZWhMrgG|eBhKNlyMFQ&V91AIL~^M8fa`$p+S z$W^CnJO_@Xj_S~^go+lv?m02%Z8j|ir@_$Xgr@fCorK)6-0}6)D>GC=VUO6WIho8l z2+{Y{lPIw%f}T90JV67CI^smCS^z2_I}8qt8jad0E3K>d{hqZ9ZaBd@@wNScWuK&V zY7RF)v8I&ae^2gndkoXZh&1ia8VTJJupTS1 z(ERRgMevF=K>q!7FZe&VQoEP@^g)l3w{X(&c7bn|EJvMt)pvi38K{!t+5D{?;AK(z3u4{b-l$Czy=LRpGi zC^f#{-zybFHGTD+L1Gi$9c;J^AQzrk)SWc?T7!sXdO8}=fIn-mM-(4?ixGn(j7Dh1 z1;eOS>LG-NseRNs^!65Y0ht=9OIIaNa>cPP7dvYI$*Z5!+WlTKubn+MBCcZ8=WVbO z!}COXWbKdyR9Mgo?>o#|N8TfMGi*89I9_HZwQ1P_H+6+zW}!Wv0rya906P}tg_26( z{C?b4@&55Ws9(716c&NIlJpCB$;w-Pquzl7IRvD%qm%c4qxNy;_owPX4_vmCm97ih z7m4g^JWlu=ROFF?WT9wI8`E_)o>Rvi!t|={FMPx5kyko_0tG{9vnAdyy{&=H%z1xb zXeTfDCvH6H_*`3xXKm8Eq zTSAJIPM{b#L;v^l#@w#}**hvf<(UZ`JW7?`gP4Nohi2~NH=|b#zpgsHf*?0EJ&LdL z0h?QGwfS5Ep34Y8^uX3?u2FGdBXw*GjyhGDe+4K3zOide`ZJ3+Do)G*iF3w9*T=d9=J zCU4H|&cN2GpWa#0&B_F`)r~-k3gqMS8SPdEOJCb49@V5fra!`b;k$eOmIHOG6EBly z6MV?T3Zq!S>C*jPW{k?G^;}$Ec+kZsmn_}&WrS~1_uiMCy@hg z{Hn*tQjbCA8Dr=e2P^NN_DN~1ikR5iCy0E4+~b*5Mi#c5h81Z1stT)kLMOnFcMZd? zyM3_deqpflgHf_FO6po}ygllnY@e%~shN+|R4w9*+Z%H>f~b0)=qYQF>(&GXxrd;uKVs-^sCu@#}2IreH8<~d$GRIyBh^-gD z;$8uk{n?(Ny)DLxVym%RBDE1GMxA$Kwj0BfeYJBK)GaQcCR<#Skag`B)n6!uj^FmB zgrT+R3#C)c1Y1dSNF$q<-yQVz7%f`lsC?$+Qn(X^Kdt6qlw#U(}(IuP~T%^&!$+Zese61N~=4rlEJ<5s58Ug zM^uUU(SM_^4m{q$Pp`d3#P35Pa7O<`M(YvP6e?rCd69j`wTRh!p7I5ricaoszk7db zCI(JjH&?A5fjj@}d#TH-n=L-qi1rW}ZJF~BFyCf6Q?mw5V@o}*;%BcqQ+0hirqcI? z2~fA?hu${3&Y&GH>4Fz!gJ0CurT1p{<^nVBW{@!%(`#1r-7k5n^rtcrIg64~7a~WU z81h`E?1M4~!6+$XFg%LXCNM(0YOYrGe9SY{2HX#R^B;kYs2uc9%q(ry-4?P~ma}Q3 zR-PJnPX5wjeLhRn?0m&dSB+9vh3m)^$UUDtJO=roj(p-#3S9jRz6T ze{orhE14SVuPJNvlxm%N4h5g2vB(m&HoJRsC-BC{UhX8(DwH?&UTqG*<>t@-*HDaZszUJwycNd6Lne-%bLw@Pk=E0 z*7+978V`kz)Ec@|i7p_8? h(-|CY{$Gdda2>A0^?SMgPXGV_|NpFBB%uJh4*Dc zVQyr3R8em|NM&qo0PKBfbK5w!Xn*FfI8Vwv+dUB_c}ZqelUL8}xYMpl9G9I=zphN4 z2O^Iov`K&ifU?z&?{B|_g{!E=OFNxg<_DVsfdiZs2fzU!2n!}CI*fC=&6p9}uM?^eOL7Xc!_JKud`fTa8(`96&@_ zdR6-BYbajdIT(`UnoLzDe%7NFZ0qd0N@j8&F$AmIPC_`ZiVN7T= zfH5UZq(jR8?^q}3T0HHwD8|9-E(yIr^cFZOny?EhmtZFr3%l12(F&uiAM zDi^-jh7S`efl46o>;Aj9oe>u?Q3^vC(Fk>h*BC@ZAWVs%WEi0Y#bJn$5GgSP$`pqg zPX#E9lZYskzUPf2J|vOrfmDS04Zs?G*T7R8Q>nyk0Iini(U^=)E`bS`x|f*c2CN%X zA-KS>vj`CnK#{TT35@LrNYW@e;SmjH12`HTb9Ew6B2%6xQA}xIs#3EA2XGLjQlU6H z(Z#q>1NeKt*E0~s7}ZHRNmptTJb;Kwh3q}skD?={P>hJSs-U}751`f$Lx~5vi<5yM zx^mYGd&O6s+x3&SATmw$Z~!9`Ni<0$45i7OfPPPUUb_tksZ>0s zzayv#LYjm`;h7Rd;dlmg1QL~3O)aQ@$|GIJ&*rmcfBCmf$H+opTxnsI2?lgDgV1S4UK$7sb%qGZR5xC$f!2h8GZ|2x zN&`BWa2lXdB!izs;ajyKk>o&GAhf$8wR^z=`(G#tB4xlCAe&ObS&U3cxTI4GENjiY-UN3gP zV~maJ?)BbLhv9!xl(ho=`2st-K&GAhLBLa{Y?(8IG(`!Q6Rjfx2%J^&ft>I(3bj?z zyigcw!(-lZh18Y~je-Cl8Ek7X6xXb^I3A6R>_}9GL>82`k{Z}^AQldlFh+)2q(ew0 zWn;^OG>Xjk%phi7T+J+4(KUweB#ojo3{(R$golu|60AwD0n6Q3tu=z7?tv)p zhCGnnfU^J-CA*0!*-d!Z$!ObgY1;0~wd*0#_1uLB+Bs`Q5Yay2wN*9QNu72$b&Xw108-QA9ew6Qw<`qEqrKDp9 z1jbs_GFL*O zWy$04vaTOqAD`(?b416fFc{QfEppr}*pQXokG6GVP)yDE*xH-EzwPh%yIvb45)n)= z|H5S7m(#${h!Ao;y!P>m#7TrMQ#|uSCVgAhm+5Fkue>(!#F5*9GQ(yTawQ#`4bunE z+HO^{V8~>zsvTv~)J~6;C9N6+CbOZSVa6t{CFR_?rVLS$8sl_x2SDQR#qOdk#cYkI zc!Z*mpbp?@gqq>U-07J09}77@yrNRcT3)Mo3$j*_J`t3g?js^n9-BUoTXW;wzBVNF zEY{!GP;xI8^8N^j7^lYGYAT_Y&VAuLP|JW$C*YAKoF7toAr(>5r`B+ELM`DaU;Y10 zLER31vI}$w3Eq0E^Ph6^xJW69U1~x}26RCfg(wad3$I6~AeL~b^0RPBd-4~P7 zeahe6H-RL4&!U+%;9u;Rp;Sungp7%u6+7k(zvy;-;0GB4zWhKfz6PZRK8@%&CP`*2 z3{VVxjb`!D;g+Ljzq}(!f!+cfWXTCC0kFx-O9z?+ZFoOLA#^vaIZ*W9#n8y8U-l!* zy;LaeAljNN-Gc}k82q(_m|?O8sk{Dw2-oxyW7(93lyt&i92eu&wqc1K^tv3~N{&I_ z-}QG)ldRO3Hn>VkO)FJhlCHT%fFwyo1M80ceEJrKGd&Vy zZkQ?&0v^YNXQwRuyOWiTs;jF<^j~`2dxKXCGoCeUjyb~8U3ND@p6ZJ%x`WM$U;8Fy@ooU zynlUuy#Ma7Mm*!xKY$WM&>~6lYW>o-;B&VotPA?5_T4uFj+>j-Le;8__Djmt`bZ^J z8EO05Fa4h1^ZSFp_j^4z4``P(N+z`B0b~Tx&FhS~xFjNc0HQq0Xl|Do3=dhTxpI5^ zzpDQ~pJ0@rkiJUpycD>i|KHo$*{k;d+r8dX{Kw-wZ8&u?8Cr}&98reQvI}qLZUN+6 zxTUuibezqgquessrMoN~q;bhEFO~?()CgLCk=@pv0BAX{zk6-?g|d)eN;pdsEy135 zeck=u%Tlu$uuB?6K*BIz*wN-LGDPqDu5MZ*?~=*1&Y90Fd;uj=7Ql-g^Gf40GoJ{pPOBL$@4&8Tkh~7e$kd%vKCZt^Q#hek`n5Cfw7CCOuif9Axs1Q?k)Z_rFN%X69FQVE;GnIX>W z<1?LCJ6+n5-txVp5x8E}0?GXa8zyC!xRlkhp;`9~?eeH@yld_Hv@SfN2Ah+8%t@T( zS(srh9%Qnpe>^kz3|_6z=2AOz(pV@}4ol2N zr8TL`S2N5fQ2bXCNAq8yM8WE7MY&5_qm7dbm(vo-&1J}U8|`seERvP7DBJhU zXnz5LNu+(#)?ZuDI&a<1dQ~v2+uLk`l2lgEZ-G^Hwf}gLb*N_Z-STk94PURXbCuqt zEQr!@&4Jn0q6(Okvxaxol@=wntg5O`Wtvw zjsHS|F=rC*vm0PV{O4}3w^JYg?d)tnjsG6wDaXZ=B$3@|-@BkJ9KdVC`7=~rHZ3y9 z!^`h7spVC|4evDb;eqo!O!?KuJM%hx_yUxL$Q0~qFxsKj$Yf)lEN)N*6Bn-FrY7&^ ztALAH$vu*}DX4jKS&M=+S?V#6^FBwdR29p@E}9=14i4E8PxW(Oy8f*|eBNs+hqmO| z<|1Bg$$77;T>1;J<_dE(0W$8I_c@PYU4AzC8cj}NE%L5t)m%v4Jp1ZptJJLdkY$q2 zg;C_SHiS=&^&ww z8V5MV=Uh(v+l>O0wbw*1XT=xAkXDP;wFXMWd7dS+3eH_tTf#x@oCQSi7bgkUFTPH|LE*IiL%16%#GDQLqo+-i{)qXsPJsAN`1 z>&jp$s8&q{_BsTn{ zp1OW>1d@0Os#=<=1NVZxK+wI!jV1=6uX+40isf-|OjPUIIIXp9sgu$!Z0qGPD zxrLC=BP=9Sku^`8&tH(B$eyJwK)wRBAoilRl-W^l(&Exf=zqK9vaz2Fhr2DSRzvvX z`E(mmT}_ix;N~r9vAEZ(ak2%aLAg6YpnH*FhKKM9p4PQAIb#kf!kf-kJTj_rtK+C%Bs@ zu?|N0{*fw5s|wzYd-E>B9lVNlu(HLfC>0xB?vB{kUV?afzT&fL{V(5e{D-ao^?ENG z*8g5S?f-a`r``c)?6#3Ql_^KNEO?mZ_%kjGdiPrvOotMc+X|cSuP850S*zYitjvy6 zyJF}sT%M}Tw=SA#tsE@lk}A`u4`shs>AyQ#=3YlZt=j*!v-ff?{(EodN&g??nOj-R zPaVoQ@6Hpd8-(I)K0n|BxirVYZmCM0;FxCltSVtx+M{<>d6{;t7RYC^Vg|RGNVGc&=xUhoh;B`u{{G9k+i*@CfhZ2dwn}`?d2QxBGkBPyYX-JT*-fLlR^G)AM?4z7}U+ z=^VDVcN<1ed4v@w!4x2=ns7mXH?yPu#ox>ZpJ~6rC(y!nN59}fI(2g>)xhfjNvJs} zO%CAG23{#-8Xvjw*(5gehgrmmIUS6m%`` zZMW3_A!VU<`EF$fR_K4fzg^$|xzpQwivN9-=f3*Cwzkh1iXW^f{Ta;wkI=6~l`LljIu>bb0d9@o911`{o zIhb?1r3O+7d+5MMzDm21rDQA7c|sz%?gcT-Q|y9g{xjs4?2?FZ|KzABq~%d6WG)Ec zAR;u*zh=w!I${1)T$^@ua!?Sw8m{Ah7jyU>h$$ODHcuauD|>44UgaQV^R*2pC}L{P z2FB7EmRbT*X1QoMn6u5CFMf$pWY0>{ zyA9VIN<{>^bEo%{WM9NwfFS0CY&9Va&JGQ%#dW1g5@C!?k;n~?%30r~Qkm?Q)@H3F zpM7k@k6a9CXwIT{2VWap%_+ftz8q2^W{sTLiPn!=X%-`90%B-{c27$t9^btGv#S5k z;@SVe^`HLk&Woq>f4_UJB8}&CVmNO*ucR;~Z;_3QJ0`sK;~ zKgQFB6QUFfI}v4B4wn;TFidG=M$HKcF31>Ve*2%ar))})6O1AlM|@}ljwl;HH$fbV zPSI?bD5Vn?dTn6nHg;|%0!Q=;Lp$&BzqWiZTd_DZC3TY^L31)E`kw##?EFk|fnFO9 zcpP&E9}mtTq(XZBn5wS%Yn$)+!{0^M{LK<3>BEm}|?75}y z&+;pd(A-66jxPQF_~`$^1qz0dd=8g_DuoOvgE!pv_eqqF$7C3xD_z_;Bz?~_ZiYSj zbjZ1qN{~bXDPP0>snXcbo9=M_HLfP#Dqr)&84&nXoR6FXs+>o?RotSNU)%ZVCyZj7 zT@Jck?J@guvX!|@5EDkmD137#(;HN9iV24!HsYT5?|=V0;7sxeJzdURVr)((FAW(3 z9>=NK+?`d?^wD<+Q*K(&ap!mzQ0)voL9T7@(ckf&fvJ_coS#k)-yZIt9hzGVJ~v|BU3hnM z26ej*)!wXdKj0-gRW2y}yxFe&-m)jeYYLR8>RPgrurA*kwEdaUwW*u3X8M*W*0woA zj8GwTI!P+Vb&6RU7!|a4{Yua~!SvV3T* zUm;rF?3HB8bFM=;bKRv@*2RPaE($4f_hZ(@=hMMY`==j1L#N~Fo>L`PaCPf#zBSr@ z;3SQ*vFDkJvMaMQ<704wA8@3HYfk?t8!u=BWlHd_FJ^;O>@|&tYn=Ovy!IL&7I}BdVE0{=DgTOFy@s zYgUMqDc zVQyr3R8em|NM&qo0PKC+cH6e{cdz{vT>D$PcAix5k zY-g|i?*GMuEZGrpAVEO3iHml{lJFYs;FoSZ!V|H-evp8WLc&B>cLzy9{iZ~uPt)5)t}fBEINpOOP{NAay`Ku8fFw-H&iC|-MS6sC@(qL} zoV`ufzZ{`FzyIgS@vGxEM=3}wNUo|sUDsVCpPMd{vqCDKU4!I>z;F(8!bmk=M;gra z1B8>4quA@**7ojy1zAp!0{Jr*sq8HXwEO?f$#1{@^7_&K|M#ocC*%GP(RMcn$_d;m zVCBE$@#;Ua++gchGf?Q&(TcG2ZR=(@%0QtMDO3uPdS75#NCiS3H!QZ@v_~=rTz(Oy zY4vMFuLNNV#2KeWR!gEu^5^vPrz@nEZSxCCLWj`u^YQE{C^?RD!TY z%a1W7z|<750QCuFu+z_9#SAcKbF$di*+TFlzu)IO!>E5=QJW;q{M3W4{*u!|AiAB1 zqa=}-mtB9_NW!%ITQL*Od^TOx1iwxG`@f?kStCl)NQ@n%>)qhNq zT$DRlkxd^}Kg~36+P?3U4RfXhr&km)xL|3{2~!WhktD336wN5SO-cd(=)<;-%eLh5 zht7;0?pcLLh+ORLwf<4bON(X|9)HGX{~xe=@DZ%!S+)A!28}ZGowG9mDKJG4t+~$s zG^9l46y25ljGRr#z<%y=-Pl{O%C;@6dZZ zB3Hq{0RJ+T%NBm^fY!RYWn1KhCz!UqS2vQO1zhgky1s`*m{f>iD7Q=qJc?#OTUqs? zQDI_i2gySg5l+t#uV>8v*ul1Hlkq}2e8C;x*7{CFy^{ld-Qll^NY;sdPZpuCjoC3Xp1=b9LFsCk@|w6ucW#{tH^X02w8B=C{>k zs+-0Di9W{t230e}V#Yu{!-1(6!J&l&ao)rwJYZ?*HmVh0P^EM$4f6d+lZWC~`CVnzuL(*1NdEc37(t|KU5F^Fp{t00EFU|8Jcsvkt# zfSqRo-WX`z;FhYLD3z~~Ibe!;_ z+abC=|0fMMh0UqcU-xTx!4`yp@AHZ@3XRQG$yFfG!(TJ><1 zA1c%mm?9Xp*-9qqC(-^XHH_Pe@?HA1PK{MhCT~+}bbup>hS(dJEjeHH0$WsAB+GIA(Q+_vS{Rh3qQ&!eBwsTPZRh*`5Tsn5E`X`hF6m(m%4KPu3{GkZQl+yD z4Wu~-^@%b3SPBVZNCfQ`!;}~4mEdcV?x(`I$!_GCsSoRs*yywcUa4gwnGr>$Wyuq`cum%d37prbqDY7RP|lx*V**K-;6o z!vI(2PGGnV6Td@v#piP!x}Cz`*Xwy9S{>Mjy_=GM_Vs>on{x&~hC}`I{wGnYeUJC+ z`x=xm7=j%-zeWl^kc?PNh|^-;_xrBEEB(IcL-qf=n)dG+ZmUq|M^u_tzYCsC6=Eq8 z>4$e7H<)SqC`ai#L=nRvrYlX#s_(5$d3(5Y7{m4rr0y;@_wT%PE>5K+3%2e4@;uy( ztuy_N0PlFgxxYlbjv?QSz8z9WbIOq-Y%yV=Zn#)QTTwcjWT2Kj9(-;v z&ju$Ts*ROZKHZ~}V~;1_4d+f$ph$CTy>0^yu^}Ntw-aFw1}D}5v)#^>ImIq{u4k)- zgbBwAt%)|ykodNvGmGGct)Oi#;o7dFJ%dO<0xu_AL=rJpGg*Zoy-Z`-Q7rqw$I0_r z=1T~D_M8%_%>=TYeTgM$3M5ew$`I1B9>S*ix7Y-~~&SAo&EF&J`0UO{mLq-L!#{ zzp{w#`Z}8wMJ&^-^zBMFFC~X-d^9eo4_#2-ho4yVn8Y0Mng~@OdS=vGK9lZhW~htW zJ_of6b5u|@iLCwRoJNZsZ1NRy&Z*TY#&gqH{SAJ@&4?;#e3*)rX~!$5ru|6?10+rh z#U(}*#1#^#{r085dxl(qyaI6zIVIR4-+&d_Ok*%)=(c}GQ${>wLo%Lv9a%154NOf5 zTTr;-gsI;Vp@m+6+1Sm--B6BX3KXy=l>C%$64oQEF7gk~?<7Cf-z~I8OKjy`Ik*qy z;Le%65`+t)?#?KZGHfP^4HqRHs&YB02ulUw*M2JU6e(?u5NBNbxEiH|ExuM1cvZEw z6eP7!{>o6DA8J1LcP1rqu{tc*EU~!J--+0OG0A6q`Mcu306AZ!&J_|cN!z1zPpwQv zF{_hMJTb;otqtji4Hc&#kanLLCdf{Z!h8Vq%ECT zgZ-gY-T+#A%6<&Zl*ubjNqc!jKQM5s1UeP%Rk5G8Q&0`>f~|+O#iBgt5Z*XwKz|oJ z^9F1mHKn`6!24`XRDX9^M4eh%21p2U51Q?e$~d?6qd zIh!Rr40;@Rt$rt9R85Z%4v5pAs%ou-DyZQMp1A3b90^6tLLhNUVYc0FiKAYrOC0Dcp44rq3A5B|Rbj?+!*!LR zA_)jD30r*B6kG2^LE^?(DUmBXS72BgzFuF(Bq9C|uU@f_h;J_nI?-TN$HZaRnqFfV zXx9!SFl)ap_BNDDB#cGdh9$z112E*vxxb@u`{i%$M_YKn#{82Gbk@zuvT-=gV}JG%+r!0@#E@`;SloPl-ccCFQ@AQ-l*r|DG%5Ce*)r=PdNgTc*PJRP z$gEH{v+aJ%uhSqxIPty?b(19JH|z$9^z_OQq8;t70qrjP)UdjBvMXGPN;)Kq^&l_l zVjm{%>0{wZbh6Nr(dxCESNGhz9@KT&I~}s|dQg~j-9AqFc`*od%h(G~q2u;G1b@wv z2FP-CcW$c3HOyfQq@*xh0&oOR90T>_yZPC)VTrI^rhEw*2$S>sgw9NB_xz}%N3&^R<#Bk1reop@*%BHs=M-ax0wNmq6zq2s1wW5Q8=+9t=g6Bge@c@L-Wz zj6uqeJRMYDKOwi#x4yx?B%Y1dcjcXfD$O&R;|$wkI?iy1A4!Y18XmTwx*BmAozS{DT;|wqy|^>!!=Z_FE$;m-F6C9_$_~raku&Pnd&oTp?QPOsjL3(- z><~@wEec(!tt7_>$CJBcR5t^w8^r}$fYrl94_i2IeBD$+vrh;PPu*e8ofAX&zE&|e zgdd*i{cl=WgxdzZ$1zMcqC#Q;>hHfljio`7n;T-2F7dpl!Bz-RxUCE6jTb(nP+)kC zY};Pe)xfb%M@KS}!7kNWOO)#Ilo;X!DJ0{UF>A>9`t=DEG+9)6(~?L9Ohqb08D9nu zRNORAW3%EAGSrr{h`NTH#}ETVR1w;t?*3cOe9k3_PsN(p zD(;=|6IT)R{K^?xuKkirzvb=imp=BatEh_4a(#%5Qw|6(QGQyeYo2$cYYY{1 zBqJFtRIrIhUsW7>@ER`0Lo+OjtsC3m8EU7BsBvUVHp}lLE#J1aBVrg_RLu9kuT{|*Kymt(Cb!YWYb)#K5kpA3GSE=+ViTFTP~>UotUuq zz}#N2^+KWB)nn%+wytlyZKG_hnrltsZR@%J``mfxQ?Q_J@iXc>)id6<*aPUCl|Sl7 zyVhfMy4FnIwy1l%b4BHCi@iHK)mz@S_~dt~#=LE@>F+F@4&SzPrm#MP8Lc9xHhP@q z_Hs1p;Y)?l~Nn{ND{csNnJ$Jx$1Op zQe7pNtIP^HlaB1`^!2#6tNtnY?9TKqz#!)q3%deAPwmV~z=B8>H>p|yLoDKp8?m35 z-kcmB{JEi+#W*pHe_Tv5w1`T;3hv5p#Uvr)j3{pNh{1DDj?6ian1MGYXLckb88A6B zuKutjRANNo^7D(7;0vVS21T`Ua={hg(z!Y386r`0_(u`aiHu}G5h5kMU?nXU6+WF7 zrLK20V7`^z!+JFtSCMt6ypsx|zA)%gJt?-rusADt);WT)Ux;=%cjy#eatn*!{B7Q1k0#QZ1E*4`b@UMQmOoki&pdAY>AaU$bMZgP&ZtxbcVje zU^Pp?7KGhivV{OC&+Z@Hx)Ov7qV9~vfIShxQ3UmZ4dPNVG_k@^N?oi)EN9tJ9P!~IbeUzy`>N|dbIu_?H(8KkGow+t7H~u0?bZw z^*BM;VTaOQMV=xBQzejsMJr2LLD_7VQe6NUWB~=CHiw$lkQ}87M)poK{6RMjNHe z2uB;GH;RT@0XZcYwa$wJicX7-rqE~#ji%6O3Ns z5GJ4MUtjsqcs4AEK%4&Ub5g4d?f4IFc&O6i+@A6AybyoR{|x%@)(0b>92 z`&EKCYmZ(I2l|&@^A7`R3c&kC#U*S&xciXq2fp|KYtSBi`^8QL&p+@*oxBwe7Am}p zrqueB68-U@Ef^uXrxj^42(&irfhLj3XdMOprK3(D&wS>D8 zd`?1&Q$a1L9o(Qs)T)@lX->j!9r~IV0q3@YyQowT zqK$L)%-J|Yx1V?la=OOv1j{AiN@M@rko3Lm{y@j?me%V5=$l9&o-z!QRjEvrW~zpunGZxH|#`O%ux6fQNL49`2&*aPA!8~fv{iDqf7nuXhPQtnAO|*qHrVnC#HXM- zOID(+zI7Z{chuJ{F9WWyxt5G)JX5JJzi@?wQ%p|T-OUmNoQZ%lTSK2Q63LP;B>cgp z`UHwe^92v@*t@DR0{oRAYeHcGX#~Bz&a!RNd{9_(sRYlEp{?t+n12Z>vW!^-7*T?G z=+kvfd6AmXtPvA$bFqzu1)B2xF2)#HG?DXGpoM(gIHlUB!;NAALC@DVJvuW+@sdeU%Zf zaM~;=-$x7^E~@ z!RSOq;$lmdZC2owZIR)jX~Bh5JUc`2DS_r*XnacMoElE_!_XmMVfj4#uyoW_kq=<& zSouR0#b^#+NCr}&%=iMHz0fWFP{ReM#6dP%4X3tf!iB?dSTq61q;7_|sf_4Xp^F|r zhpS?6s{}d~3+*Ue!{i4rYQu)9hW(Bh4zi!g@?Nm@^B(1bXf13zQEPk*(eu^YkoSB| zkBNA`aAV^ic)x^Dg)clJnsj~PgXwMvyLg!rcr)@BAcQwVedco5xa>`}fj)s8lx9Rq z!dV?CD1<-5&^Atha-ylP+X*^xI$$IEn`Ofx*BPV6FpA}~VQ!DzVB7WHHfJZ({UI|K zZs z944`Ym^VGLXN8XF4Z4$81*Mmz)1(!acE&U+QldWau!l2FI%TjkhdCFSz6L}m5)xOw z%h2tqxd?IMbga|#Tc7O=(zU6EMYoP^rNOW(k{j#yb*97B(BTpr~B1N1Pd_Dt9S za&`@p7XriA>ks3^wqm+Yth-FC+c>A~O*EVuVoJNo(Yh@Jw=-xSw_fX*^jXVfXL_sR zNL|S9G|_eIJ@TGo_cr&gJYBbroL#qZ+Ph7=V?bitf(yPvf_PsFl~I|rVs@F+aXMQu zU8i#mVm3}^E2i6Yc92W~99IXbtiNiR0&SaMXP4fy2Bxl&0)Mq#pH@)O?O1BqFqZ8R z%CJ<>k`t_Ck%Q23cOhnlkXFIoy19id>xL0tf^A&1JIp^Wrb-t8b&yAbu4i1`nA%2Q zAGQR3nORL^Ug@?y{ZR%^3C(#r1zZTC?#xd&+3nW)R)*YO-HUJsuDLxF`DWK>5It&6 z0jWW4qRr#p4)%nzW7##>i$LO;_O-fwt(ObiCoa8$yU+o~KIGQTJ`ZT`ulol$lm0=@ zq&ui8`G<|GBCh_cG5+HD>}(jBN>1&*Ef z+2S37PTS^*mE6Kh+vZYK;Bkw4p5IlZ#--ZwhWx%3lkm>_xKveA7GPn*2NY2~2^O|*Hwm?%ip7>bdHuc@kXdxAd%e;DvFkJAwf&au) zxPJ*T9Qt^V{{|emBqVb+k%YoRKu(^Lyjb^fPAO@o2gmnw@ryrQApL z`T{oYs9x(998e-+B`mctEUDZi0dA*WCq4l446 zKq*X>KnfOn8PrIFDRzE4dv&M+dDH_^Se2Duu7puXg2~Xj? zoY6~`x8EbS!m>GQN`Zoc52aW+M@st&zFh*Nr;d1+4hqZP;6u zWk})W#cf`iv4ppqmxdz-y9?ijXOf3Fr6|clEt5AS?i$g;bkJsXBiHE$swHt92CP4) zvN*SMP!{c26}NssGiZOw-|f89avm(^)nR;Hq*}Y!S-~%!1wY zFQl#ViG_$U2&xk4Yn~znL!x9LWos@>|Lz4K*;e_rfA2r?xX<3Jj|b^nv&Ai#{703` zXO$pJ-`96f_Gp22p(6mLKgg=KUB8F$M}Jy3S}0k++WxCrqDfsRsQ#c4z3I*{rQ)LG z#QHBslgZ?0r*?3HassysSk>S0>Obh zZsgXqvHz)TUUvOy^N?d& z{;iljDn8TfnB>3zJF4tnlJ2kz&w-s@U4DB##Y@QU-R*q>aWhGq)+RI`O^Y|P$Ho=X znCxa3NbCOivqa4v+>!; ze8DLw$a0Dl=mfuuP`>6AzBGM4kjxLjw&J;|<1naf{^1GaJsZ1S0HU0yGs05B7XIvWu_LIXis-~a4X_x|?R+wRw2rGET%+TYX}$cFi8 ZY-1bS*q%cBzW@LL|NrB$%+mlE0sv&e`gQ;S literal 0 HcmV?d00001 diff --git a/assets/rancher-operator/rancher-operator-0.1.400.tgz b/assets/rancher-operator/rancher-operator-0.1.400.tgz new file mode 100755 index 0000000000000000000000000000000000000000..f9402c23625cb7abc2326e3944fcdeeff6067707 GIT binary patch literal 1088 zcmV-G1i$+qiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PI-bZ`(Ey&a?iCgMcBx04cJg*n@yQbZJ)%*s>r6`dTp5S>{TS z1bOnB8T#J`{jg$7wv#16f@1n!EK$5W(&YEuk(MdGL>p*>mXWq+mrG_TykogMc_~Cu z6wTte{*9uj{~OKc(a9u^;`m}Voz3HuXcEV>_ynRC8^IA2j+i}(o*k<{x$gu_DNT%o zRxa@XI3p%?7IH=;F%q=x>*R+w|w05nt{#=4eSQ1g4$k-fsBKpcOeP zVln}rY=QpXj!TiIsE&3~EJtS;$325}caAXMYo=WSQmEqF-mk{$l}OPA*~%usW#I^| z3)=u&9aodR_7){Fhe0*C;kZ0z#AmclXqCWf>M>)w*C?EXv6rIbRv4|9mp?|5YGzmSM$wi$*&^ z+TL~B=WMWz0ElG?teRa3P+MOYQeJB*_+0|OEv__OTXd+%^D?P#nQV21WE}^fVc~mR zutL%eX2MYw)c3u&{xklE81SDkHR^{&l1*S8|tIrN4a?-X*_4Uc%cXo@?H4RV&*<%i{*>|nsR>TT&$H!qW z0GokS;93zA3TBQ>ZP0&u~>fIk7K3%`N{Lp;?s0-m=tMq61h{zz}V!$%y$5&gHfjK9b^@FD#_kLEr7pPf%HM*V*c zZ0mo$;H;pfw&I`qPAL5PlWVUvnxt^qTz%YViI2$&wvgx&F92r5N2?1{3vpG$)hAA; z3(K)>bI2`{tDm`63z6l_G_ReGR z&H6ke6FhYOpY-*A8pY?)sQ<5lovY`Kdi0q22<3R|X^a>#V#J95h<^hB0RR6!B0k6f GAOHYD${|bu literal 0 HcmV?d00001 diff --git a/assets/rancher-prom2teams/rancher-prom2teams-0.2.000.tgz b/assets/rancher-prom2teams/rancher-prom2teams-0.2.000.tgz new file mode 100755 index 0000000000000000000000000000000000000000..7ed4d25e9b471e9ed57848d5b695753e0f0f5a1e GIT binary patch literal 4289 zcmV;y5I*l8iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH;bZ`(MN`?LOvIrS^j_G-k$y2YPFXCw)S`TerWHt_V)G3{h%2q!@J}+bgwTV;V)syWne? zqF)OlR2LZK>F3&3i42Lt1}8BVq)OY;BMJQlrJ{m&Vbb2%eerybdjEUA5!H_aE07n=yA`vD;QZituKnoZkBt#iO zqg)G^id2HeIAKJi3OsMb#DK6&=V*rJ7l4{7jd8}(1&*lFa@qyoXcQxODj^}BPf$u4 zp{=*_SgKj@?t8^#W8RZHHAW0eP3r53f7fyy}LiZ&qn0l%cH?K9;{6CS!og%gxiPu+tiI#upx8plMs>LKMLl!>HV+h>{bfe<&> zsbv0350<&JHAHMy}OH=-Ch!5uO(GD7JpSCDy-gLlE$Z;L>t{80U@ zP#P50QkvfSRa(YL7&D^_ zQ7Ke#OZAv?Abh$YWERb$5m8RlqAwF2Rw)*gh*Bb?#ON=BdSwK|Ra6p5bk5oILJ0ky zGIUC+c4pUr8l`Fo{Y3sk780c)Uo%VFEkS+XY(+dSBD_SF$>ePAd0H@(nd=6he)l{w z98ym8)HZLa|7+}jp{1K==U2yCW10oz!j^I1HbR9V-{aR*T zx(|){+yc@e1i!dLr1SFgV}7l7Uv#t1+z|Zds_B0f0_4%kZ_fY2U!tN%s0XJw)Kjj#TISJXLS~u%!DsVW&PV z@t-MS{_@n+vY|3Bqq3k1;|a>1h!bW5+Oz?r~ z?5D*uKuwl3{vse`*-i)aHtft&a8cuPlIcPMEr0PKzk zTo}|EpnNly&yw)PIA%PAgI)WR#+T`ENI!$$@Yk@|lSniIDQKAU=@iV$*a#btB(cl~ z-mmDeTX=0~n1LAk9FkNaC=nz4tC%P^l>Y#r53Vg~3Obz(i=l(CH4O|zSy!E4pS@&UboSqre$tmiw) zJp99sp!;t7g)Q+}Ooqc!bxR6fE4BA#aG~}Z&y~M~&C$KjR@*Ekhs-E5vlGgT?p!n% zv=MF^BIH&b#wpXZFfomhYXy`;DB=O-7`fe|?vyi&BxicDB&+`7lKx+b{Xo5h zi8vM<7#Sej%!~tZoUHnyF2*~X0p%e}qdI*N7!J&FJy7GO9lhf&@e4>y zm>H}8pFa54{>EjlWixk~yDLt&fzMXBzA+a>$J1dEOs^+YgtiT;l`dc4&s6BjlGI{} z2`3|r8iQ$-r%M;VT3*t>+co-sQ8xPjE(8^mvEd>an+!Oz2?6`Rh5h8JU^3e{cKwt3*yk$PG58xqRE zKjDr8CPrZ4{2D_>hhQAM8tg?d+S30QPB#K6@O$}he;p@o=cmpa??OGEknkN1V=^&5 z-GT{YA0jAd!0i}$Ee4>o5 zsc$5VnlT6jtOy-Cg4xnzMY{V9i5WIhZpN?U}nn$cv%m#@ce?8FP%lG z+~D3&9(AFh`;evztJnOR-yu^%yb`HQ=_}o7T;0tidBjg(PT+gK| za{xbg1I9w!_-40YBU_Kx_90D1l0>))+_*(L8f(7`{;pqb&M%A_iM>zqyKsY;c=-e^ zZU?&XrD3~%`swPb55}0H{N2|4WN&zksa=(au~6EL2cR$HTDU~W0`Ai(4W&?GsKJH| z9Tl^QY$$!BXl5vSi+d$QFRw274f&%ae1!0i8=StjYQyNK&2n9Iw!L6Jw=`_udNQBf z6~UpsC31s#Z_ZHb=3@DZD@XHDj9l-J?qaCEccBFHZdaM1b$$Ex`l!BZ`HIPP&+n34 z4|`n@d_WEU(EKc}nv7Gq@Xi(!Q219H?f=5re~<%khyCB*f77Yj|4ys5xA$cKAET6Y zGD#BEEEMxmvG^;Rd3!DUHG2GOHM2@E%x>`+4T-70k;%hcPKU=thvScqvaJC4 z`eWg`{+rKhi<{((rP8dM!YbT7oWHN(5Fnpr``aDd4#Ut)>)E|_MvdLDc^Lhau}cgk zYPDc>xy6#v;x`hr%076zM$rRI1WRKq&0Zth$+QCqP!5o+BJW?EpLVW}4^JL_S=c|J=Vm{CIqEb$R{cZ`+ev_Sk9(E^@QVYNaq7Sz}4f zv^!-4OEK-GY@8rZ#Ho<1N!eMHMA8XmIKrq4L&6kRTGyqCtU?mT#BD!EcC13evh(aJ zY*@O+=`M*;Bt^1Z*MP&1A1hN(QMXIkDkW7`d(l*@!&hdujI;{0v}P5+wV0RA8x>QO zr_z+H`69XIo~Eagr?X}%Dp!Bo%|`hya?@m#;kKQN`)ud6TiNDioe6F>svGza@K1O_ zxn7IC$RB$NEtt>kKAp|ng~@!bWon;zMVd-EXC)=D7V}EiEZ^&{zV578>O9<{=djwX zG}Zqm;q8s*zeGJS^6SbS=f8Hlz5M>i!QOu7>HPN?rF{N#k*4OPJwQ$JV_;uIdAg|c z0KfJ$*hp0J4NiA;#_?`LHx^2tp}rOJrdoN@?^P@MCr68lg=+Dfx?tFaozqn5i;{BN zv8Tz8Mw?gQEX*OF+V(U_7@E^GVOAxmdXzh^mhjQS4mTjCybJ9@^Tgz{%TL`eC9HlU zba%NIm4j3Rc8cTD&cBI9dn)%S8}+{w48MT^xGVqnpxs&4{|8U`e~(kj`daQg-yn|d zuhju}$^Y)M{qMBi?6sfd|1nB!PR7!1{XVY${_dIlKFc=p|Ij1AUGo2?v%mWO&r|;2 zqm)hZ|Mv*Kw}s%>1%}+K@0D1r+r6g2Q7BzkHnP{+rJ*1tyngE6w0j<}HcZT8q*^1t6XSo!`(=io{HAEP{jKG7Pbd!fy(y2~>i zq?8#y9}*JYkP$Av1)|c#N)o6sG6o|i1}=Yw^3iLM$cUyB1e>h3s3$z~o&m>^OI&@C zNF377`D^(9@iKsO&ZhR842fe2614B*Vc-Qvm)DnCNc5gT_L9dhy-SFwR9-Nmx@rG+ z?0doBFWI#J=N02o)BKk|stIo{5DiFplP31vN9DZ=)NSIu3I^oHdll$7@m~Fp_Y8g^ zl8RKp$;4=6B?a)X0A|gYX;BxI_MTcU#N(|NDD; zPv8G~lrnqX0EOm6s>c}PbtssS3f{rYTAm$=O0Wq=JJ5xl&Qb48=V<4(YYHXRG$hP6 zz3sIR+lP6R{elbDG~5!&DIeLE_wWCC*!nSVN%;*Ix7@b$Y}xSu=3e9Z-18hW{(l=& z?svgIMWx6HdqhV5>#QyNkcMeCCb-*ds&I@k34|Oq@v}y*?DcvEZ-?76qcuHd&+SXD z_8SN9-jzoEd~WaiOLG(CjWD0X?76#}UIqIUtI@T+1I|BZ^LyTbM|23!=Q$_&XKgmK z@!}$Tcv;3Vo56yP!vpgIv!}92=LNKVKl1r}R!L8B-x=zHZ!^0~Xl$cR z7+R*v^AAnrVDs1IX|58@y|49zPq@8I+=_I8ZCmpBhFVYlGXH|oHuMX0+tM$6ylFiP zN2M=3ZNo(w>3T-i4A*@lYOBGzOou&+X5ijM z*owO{(oZ*NRYsLj3qZ|L#2nexAxxc`8rktIGcd00960)*R9E09pV5_VbNv literal 0 HcmV?d00001 diff --git a/assets/rancher-prometheus-adapter/rancher-prometheus-adapter-2.12.101.tgz b/assets/rancher-prometheus-adapter/rancher-prometheus-adapter-2.12.101.tgz new file mode 100755 index 0000000000000000000000000000000000000000..3042cd86fffff8c427d5756b95d8f41abec49470 GIT binary patch literal 8357 zcmV;WAX?uaiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PH<$a~n63`8>a(m&%qcpJqtvnPb_mQ)DM`ZS3eQCwp5iFAdH# zNklWiEP#>BDBjOF!~s~?e!>}5Tu?Ug64Axcpqg9)PipWjhQMZ(j7au2|w#TC%`p6|X%g=ffoIYpe# zN?6k|3PxCjfBa8^lFM|80`-6jU{eeg7r$?hy`?ypJQE?>;mu>{G#jB{s8Dtdnq_;j zRS;VBkUg%E$sdRVFyrTQfO^}G>hY79@B6(fEVLeTpbIP$du>B zeDX7%VLX6c&vUBvHO5q`7fhizBk=(GiRbAr$d0*C1K8i)-SIreBRs*_s5;yKR8TIc znjOVN%2!77=|OJ5uObG;frKH->J;IXf9hdxz;7DKKqV+l(wHcWJb+BZcKR4}yJdwS zvIq_2S*S;7diLMF|L*A@L``km0@Ij=L=K?ud16FD+s;Xfp`LRkoKH140k6M5JOY7E z9fO?mERJA=5DIktuzw9e&-9*`%5)K|-!%v8r_IBLosf80o*~c^Oh3NiSmZ$ka2O zwqfAvftXo-0|0#J2iw6;uhr=iT?)+2o1cz1=3=M~T+xhswjIA7v8+%UZ0X%*jymq-ypPJm9@;W6%8G2}u zv|qGDc9Pl9cFQ$qGo2E0eUn-8@bTl|#Ps*+(~HNCgP+L=;h@TU0(TP{~huw5WK>qb4=XfCuGoucBKnp725NpATRsDJ%)CdaAAqJNsB0NgHnfYL?;vj_i#1 z;@I<4Y~~GQWE2;ci8f>h@TYI=*bO+&MllWH=n%#vYFPff&dYdEJ3EEXMP{%KaJkipnULs z6>DSJbi_Z!nXd=&q>C!-s#xm(f>WH;O6zX56bpr_H-E|&pb*)QVA*k`pyq>N3k<+2v^c%+Pt`$={~ z;*T;G#c0M>Z}0~iW7}{cF%xWc(00l#Mh=&a9c=yM%77{BmaJU|&>UC2_S|49{3L5b z5=f$7SUk(kc*SEBxs<$pr^ji8OqMpO<~uEmDUWQHUF~Ru6cW~2`miB^o+idCR9cHQ zjeI(8vwcegUoXS8xk4FKpg_VSYq}e7|KJ&%n;P28q(X}*l}@H3-NuY(+RgzIo9SPn z(R?wP0kaUbF|UmwZFOq{$G|;2egW3nhRndy0}XkSWXz!0+Evba@NLNe4mE(B@>{km zROVf?kSksKJCZZZy8)cR^S_-0aDuwCl0(F#UBMPgDk|1b8}Roj))7+rkWW`(fNe3!!tr-gLW<;As>Ear1;`M7!-|A8>^II6wD(DYLIjX>yXR}S&Pj} z!pH>0pbbPwj!>v)Zt*sN-JS1xy{XgR<>&7X_IuO+2>;?G{=eJ?m)%#tym@{Ss1NFT zZ7kXU>FsYf{D0fM{_fiUcPHuMqF8G|XMx`@_j`080QmS3rbNOBkwJuM%x78}Uoapr z#&a!$nL=q)QYrnpN{y*hxf-oHR(KrqbNx%}T*9JynN(gwVCd$O7V?d%F2 z>(L)N&(KgfpMD4Yj~`9JiV)a6fqXl-buXZ(mI}($7&?EI-OjZDXbEZlt*s-oLX>cl zrFsqMc^4PmM_&Hbxkcfe#<8|zbB`BjL)`A{n(b?|-j!RPE;7QHpS#Z$6Iv@VZGOd^ zL=wWncnzl*WpQpGbBR@m-lMMWW);CE*Vnmz1-iwrZIQqqD;=?>uKs}IF@|aY zomc$CbwGdSsfNeJ06NeAm65np$JT;wNVn5bqp`%~48;*oQX&vKkNZJ?*Y9;8I3g(- z(U>ZVGWc6ILbs6~{79Ai)4(K(HTd_n_^3pMg$SFXg$f@#oBfqq%o@K9jGWL{~wIY;Y~xt|sO z)C(~d!v2o=lO`v|^$s1s(>^HAu6ltKZRiPj2AY;8jsi%Unt=q~-_b%h<&|zj4@ytZ zA<=6N$rFUX6{xktA5)C=;zywMz2|I8B|IF>3~HXeI?-kIlq^#WHv{j*7+jo%1>)AR z(uGIZeG_G6Y=nBj69JLhD#+s4^>$TEmj*|RzR5ES1m|TI7FdY_IUnjD-a8j}#A^Isk43_I7Cwx)7u-$h(`NcA1hSBsdrUMz6? zk-o7c0WK~I4tz*i7-!K+M*8L=@Me5Xerhz-aes7H0@A`X_ifRv(^b++_J4>%CDv7UW)4W0U&6w*iG=A~7DjeUTSZW+=9RGTX%hc>sp4YRAFt&4LUO*ucc>XuEaC1ovM)9h5_Ola24-YI3#z;(b& zP?3lzGAM!+uj<6;;bz*XitJ@;vN4LukM33^QGDMVK;OZvUjt_{?#e-5JcJjiLZD;%8V18ESC6;&yOg_jm4` zs|{%y>>LsoyLVeuw}|?IG7`tLtpXW&P*+{F3n3sWb>@Z4Ev);Ihg%edl?33Ev454= z01-2Bie~5Azus7`Q!UWLt}aSURKO>IXM!Uu*5`dsr(axJ&Hi(7g@VWEk0^^MoA@MC zQ$NBOCq(f}?ZRc}fBMb%@9q7ao&MVXyNlGY|HO!d)m3O!8Jj#!buQ>1Mst;W7`a-u zjPjbt_`PM5ns$zDbb(QDBu!V-Z#88E*1@n^P3Kl9syhFbRmwG&i7PNgK^2)aO1O9& zTWEE?Ts29IPrD&8o|Y!3?@3Bc5Z5J0_5kK^T=d5_FlaO&v(fJuDs%S6);*~$<*ziH zHit$~;=~=?YS+_$!oM=E=DY^?&HL+g6aAlW#rgtCbg5%tnf~A2X|DhG_xd|){eKr} zt^e;p|2IVCHZ}hZd$OJ)zVdW+{lB{2&(pH_uY-eT{KsCux4+i^cac7s{$DHpHpOpG zula@AcaE7($E%&%$iu>oZj|PyIVA}8fR*u=nIHD#S3*+#^9;qvRi2KhB8? zr!A^=HTln5x!z&$CGx*_u)WjV|Jm8yTg(4lBq#rjtGTE~d&qB24V1#3pE|r159}&N zf+_XI>yjK(R;r9h4dBl_GJuN)A&MB3+E*K0A!CfW{} z-W{pjM<1ws#Z?V2!J~Gpd@(d#)3Qp-FOr5UQ}3#TYd_rGLaRCfa?1UsNx2nCvH~3z z9e=+RZ3Sj-h^TwSOHJ3@GJ5{`Y4m{;|GPoirIYFnu=2@fzQx#Sh7Zo!Ev1E{%{2ZG zlEm#L{shV*WD0gK3rlQmqkeR!yv^cuZcnv0TvWTf=_RL^N+K?4uBcWtw|{E^n5(V- z45Iz~{A(GsvD$iY$@*`5XMeA;{@dN_t>gdhBwcsak@I)$AjskI3%5*^!B0d&hosb< z9cuWCI?z~EE-YCFE3aB(?mV4cu2ybVDA%higg+WtZCsOS&jaUyNv>tee#cPxA@Rq8 zCuaFqIq&pWSq4jW+7`E0i}0@a%k%D31Eub(FSR3*2AxknbgBG|3Wkaah^CJlB4{ zrvtX1dAiR0-x>}uP01o_VA=lv-cED>=b*p4-v7Ciq@|+!$a`b%E*V3TNo5Y~$2Pj( z+~X-9Q+t4laaCL%2d29Bs9QCc35!~{q8?t!KG|G{ zF5KD<)-)x)UYQCW$M+h!U@zS%cGToLbm3OgP4)jOE5POZ|IPTXz25r%=etRtmEv7% z+%`qGC}?1jabCHT_ig7<=UovQ;C|MW)JpPi@A*lPUGF)N%jLh{-`Vdq_y70ycGvQM z7pd-1No6UQ&kCY{y<&N-5QW?yP+rYmIRut}wBYhfS1dY>Q~u`)ey@+=rHCpv3q0rB zF9@ttTw)ggBmMlX` z_3F=C!Avd{Zd0paZQs~R&s(fG!Ya3A)}6WFu8qxaD$TEuZ>47gXmMX=S)y`;WC9;QcIqu2 zSKil{t5H6_QSB)alL{OA&@JEY$UphM^%?pp7wSm`ZuuBTlcTQbOsqB9xcK;th_1+3 z*PT-~S;Z~L&3g0KD=(~W?^<5KRu-b^MMTAuu1eCbJ3U`!$9$r=R1pgCq{5>%vmzgv zh&A?9TJ9WH-Vw<}oHIg&kt+pKi{~mU%;#QQ7}i}-Mys60*z(n)FpsVH)V;P+C3EX@ zEKN9PHO6qSxy&KbC_8(Jb#A{_;_I3$l!L3^rM*bewHLnPykKu9uSrB9|Mu0HrTkyK zzNM6j_q8deXUI^>W5GwY5FK6YM|)LoncU^rQzWsP{!z~|y{H4%NrqF@&HnV}&GAVk zhcc=tiJxIiW+yg)U;sP4%FIn7`dRj@xjA1vWmB^|<^n8QxuUFGazekigcasy+~WC{ zYN_%~m`cU!^t&SC86`{A?1(dkA8KePam>$;1wErNPVl)5N$f5z$Ztf>Q&`#PtrIR1 zJk@JU633l)m7M%0^4FZJA83s3kSf$P8Gy*xp?t-eF4ms;t3+Y;aC-eB>#TYm`=#Pt zy|}%Os8#{56<;Rf^E$nGX?w1W=7iX4rxtWIa8L)Sl5`GI_1o=Ck<}L4rnk8+SZeqS z$}AY!yy;wjaI3U)+wZ9insjMUzX~qTZcw~dqy<{7d&OI3mrW9Rp?02Wc|fc-p*C{M zinUw(7htA#k?1nyRPQux$4#}wGSoDHmuBIKxk;rWcZD_Sa_YzwgiUZ2Fm+S;QvEmg z!M$ZMXzzRVg$t-cxkOvED2n}B)mr<0y9Ov8acaii*8Kw-YoobOFli(rzf z*)uBKvnscvSo{@qg}r#Eihvtm#nWJc`}VheZfPa|ZyxV>%?QAy@qfF`^IvjU>#rgY2xerrlj3tz};L|Rt~r`-QrA<*iU)nlT2|LlGtqhROY+WY=rJ= zytTf*uXJ<$zkCI_O#kUJSXJkzCgP`X z!&xog@(A#4-46A|PZYWHXa$tl-b6O=Yi}FZHVs#*o@@M8H&QEz`YX(~4~_Sfm-V6r zM7MJF{=EJ_SnK~gNljHC)9Uq~zVEN3^G{rak$b>? zt?@6__zMG*+k%p7zWTAW9w6JlOe^8n89S7RZ05OOsig%>G*u=4WhRv=W4{qL$zvmY zJ?jjh-`n1GS~Vdb2C%=kx3g!o_s^7N9~QJFvowt{L8eGtE@xT_R>CoSx7~04%!3dr z_nf{Y=^RV0m!4N%dZsdMTWx~D^aCs74;lx_9;7_V;|NU&it<6_XqirLA7m2E$Gqmv z-X6Lcsn=fS2F35qldM}`pVt07(@OSVagFsSivQVe+JEc#pSwzz-cz>@|5=Cse2w8h zUr*G}I@sqcPuH9O+v0x~TLMete-4`azxxOM_4^NZlCHG>TUbT^4D^vd|GDz z_4nuAf9>t8*ME1C7D#w=|E#eyHh&vR&LWj!jn}chZY^=Ot7D6+;(T}aStZmq$8=h` z9WE7I@sP&@cyn}Yz9=FlsE%tFr`5x4uSl4#uZyqc{|?dy922EbFbRr-r8b;Tk-;dVv3bKOCE+QVpbWeXFz-akEHwrS z$SKA#OkzH=hj%EOY-!0Oik_jl(6#cNu*lm0h7)5Q!oyVHn10CLnEK0R0Kc$!2Ar9i zx=WCvfS582yx`f%@I-Ne-Ub};B;gExJ34`g3h4zCs=DU?w)P5W5MHT7sn_ zqZqq&2%D}UH6<$Vk}P%_fomn4hXs1xhGq~H_^*sgs!)1fg%tguzx!XN%#WzxHKrV1 zurc?%_wV0_oJk&|r~3v%XOSp^3QL4MNiuU)Za$*6%)kwkax+Tbjv8@zC7wZns>9W^hg#z`V#%8{7u`q#N7u zz6H}NbtT`ve*W|G!;|O3SBEd3ziSH5EJda(w*o$YWprhXW~?2j6dPGVdQlfJEM3=O3V@74#vI6Q>8bJ+dMYorttthz);?jvA#wJ^g zIN+k>pUhO4W1P1~KOMe)^A3F9wLK?3uHy3E+j1-P{iw=mo@bh}3ZMR=M`R`igW&IR ztc_47b(BpO^nx-acsG-jO@^jN8(|60S5vgg!XFuU`A{jFYzFX#gG4kuP>8@NM&PM+ zT^gE4s0h^>U5`t1#TJ}tEm$(c0|{@>?8Hy1Llj@Zu1I?yH?%19AKCW-dC^sc0-tZE8u-#apf!|c%A9xgm)N4RFj*;@ocN2 zMM9pSV>1F074UI!mA45A0-l+Vy2^-ufKg`nZUzaN8B0T(MT&Ds2rDLR&2Nk=NJy#+ zCpiM+nUPOpurS2Pm>ejKU~Bv1KIyAz?=1^auqaM07kxflL*4sW#nj)#~1G?gf9$MeV@h=Djl$vZ$-g`Qlsv(q?fcO9AiwQ=eBd0RjdyH{m;d1LxXae%#cU(1lFA z)AzHdPdCf?=6SwyTA9XkYpn%TUT~LMh0aHoWLT1Wo?bbWR*L1BpBK&YUVTEU<^5Wn zYQ>^+3Dp)7>va==0d5{Of|N&abo{HCu3uup#jMdx?pwGsZvv5Uj=Fz#;qYF=3{?z@ zr|)xfa5V1H<<0ZKqepOp%1A7+t1e@-Hl;ULSCWH$V%s?s8cOZ(_(i_# z*>WDce8ntDf-uflXxA82&9rv8eV<_fhZ1BKPTgWbFEUatCDn^&RqcKPBd(@qagz7X zofZnd#Ql-~^mNJoPe{AYs`V>Qjcc^AZ2z}6fB)zH!8-oqPSWM{FD@xIitD;;u#EmY v`v=YVpY7fC{@Dc zVQyr3R8em|NM&qo0PKBxbK5rdX#eJ?I9F~rai*dq+i?<2yLWu;w9Raqcx<nsN52cQ_?n zhI2AYzS`8&@Av!rdwcr-e!pM;zrVlx{HwuUe{b*k{?7i3y|4O%z2`fFub{s%2$np# zkc5BL|Lnf%gZmG8NSZQ9BxR`>1^^L}Bw>>dWgi~as1|3AdD1xGk0c_P6~Jd7D{&}tZL!P%4wpaKZ|cJTAt z?wIizkrHDV(*#w4Ba9Nl5iSX*WR#!)$zX(#5g{-Jno0(9mUECe%Mv0{gh5btPiJI; z5V2Gen$mOvBPOSs>q=%*zYJT!6%jDO6gkl_a5mFl$1F+Mm8yQkq6_2-xL^yWQf6Y< z>rJSf=A$rTvtGv84CNGa(T&(_mZwzCdyaj3qlArmh$J52i~army}|ygSA(5buXg%T zG$w?Q-9CBwJlcOT#+QR$+1J+FLHF(8xf<9zCUFRK47pJ75Qq|%$7(c}G)DfdhBsxg zLe!K(ch6cNudmMU!M`JnFe4 zs5CKz002|P^zjgsoe%_e7KV*40N%i^rZk#XRjcD~HMJQNviwpEn+2e&58>7B{wv+$ zn=$06K&iNj3R63_lEsqJFDGxca4SWM9mPoy6D6Ed;=SSe?l zmQ3MBzp1w?*#)LfkHj#VtBIEkX5<2a7RrWR=&#UbpQLaVuTn$_kn4$6%)!Yz1 zhqJe*pnM07t_#$9%#O*CKtxi-USpQ0v2ItDKcd{yz3S%&nxOV9G!-cwEkJRs)HX>& z^Jy9hZ8J{4a*iTnT6Jis1WHNMi72{MofLgu;JorXB8PqvP2VRdSk5DagwCj*8*bl- zAza^@Lhm!9q_lEV3QVz>GZ~l_8E#TMHzW<9qV;r(;Oo61eE41Up^YxXHXJi1lr*0g zzDg+&O&_|PB$FXrYR9Q-%MHr!%FZBZJcJLOTIG)JFk;zUVMDxG-t&xdt~6ygp(9TC zygSOpe5Cy<3&J4W9fX4*(DG)5nvqk^^2yYyo0_Vz+QPSxobjQWE_BVy{>y#Cq}(c#t{J}&=M!seTQ@NbLd91U<%#mwi(plI=_EuU*+-H{L}FN3-7G=af(TX zT!b?FyrqB@{{PkFpV332V3w@n#Sx(z-gW-X{`p+dltC2 zg&nN~J&EJeH6S#V+Az@TmqxdK8>k{9RMxC{?R$)p!4=~d2_vz9$Qk$|nqr(M#ubxO zrfl-E#o$>_f$1Yyf9xztlsD+DuqsnRuh0x_UG)OTV~mvF_>P@g0Mu)i zsrHo4=)C?jCrPJ@tp#8Rt7$58EO2&-{E*Eu<*IbP8-#b z+BMbiM=I@~x}6b95?FsKuL8-#iCWQo@_=0P@~)8 z`}<4eT=`t4m?^lbR0Z~;(urA$4&S+2e(APB)FIdFHlb3&EPYExm{^*yYurPD6q{>4r z04g11V7z8|7ah2zoZ`3QPHRHK(?&f}R%~{&z1=3S8Wl|$Pd0THmv!^!X5Oo3R}sDu z;Wr`_m(lQ>xC2jV8YOw$#HA*z4usonMDtXMrIztPm3(UKaA>UXMKopbY`U$>DYzAW++KaBpC((W_x^XVGm&VR#$0J zt~HGk!VRP}jg=VL(@dKuD2727NJf9;EYHLcK0L`-LZkVU->WM*>V51MH+2z;LKnJ3 zJ`zJ^_Q8iIxxh+It;f~Sx_8lG8r>v~7c5CS#J7K4wyq1>rAS%oR;_+Hd8-E3>Dgt} zjyIwYoe9d$?}ac@b-UfqnCt&X)0n1{FFNHc!4n*-p53G`8ASllB-lH{Eb`GG7)eV8 zrorkAX{X`Nm@rjrm`q*o7~`>J%8H4pf$&+AvJN1hJ9YJe7(VS?4wPT%26fEhQ_G~s z`uo5CxI~80P;b=HOPb)s-*lQXA>X0AV*H|5%vSH-93NH-9ltp$U+_FV5WfiIb}2Yd za0pLM)Z2q}{v-ep_*Oa8i@)btmf#FiNfIj={h~kTLh9mA%v+L9(3VgnKBMUn`t~Ql zjC>lx{`2R%&jWz5Fif|h<~Wl0eVWWq7}MJ-X2qyP#U)GfnQN>IPXtCB<&)2qv2Ihy z@?jUA;4G8#Bg&t+7kXBRoT?XkI~3!e>?2~9k+O`t#CW+S=#ahefekk*O#_W9-qJVxr6S4He~;kEEA>qSL{V^QW3Yf=H3eLMhrzI9t*0+PZ{G9`4BpGMXb=; zZmHa8m>H4L^zD5+-Zve)(b*cBd?L!8y3p;8|BTb`zkd4D`_r?JZ;rPs z-}L%vH|NRsUq3xQdH?@^`}oVr+iDT(M0ZIXb5-j7$yv34HUzs$n0()~_}l>f06876 z>!LNwc(4&Vo3bSmibx%%(rIh3hL9Fs@Wb$YWGV1-f!)M?T!z^NZC6d}b;~!y$Leso z5?ygYv+kuXb8qlgt)KWE>-f8_GUUJST@v2odD=5BhUir{F)cUSz`dTXcyADEW_$rn zVm^J}LmBmiNVMH%2-o{F%=GD*YP}1buc+3&!1k&US) zY-ZVBjoxAn{RZ9o;~`5Wevw9o&JrOFYm=$qoBUJ!smzu&JD*@nDeJuL03{;X(R zG$+l;M^&*KQ|^H{Q&r4#v=GugIHg#-^~?I9c?*?%S+ler2K}W(x@knxl*+?}$~$7& zT;ot|RBu-UGFL0`TKQF)ybEIL%Z|wVapO?}MpFizQzSf^(sbg*>S|?}&Gib1Jso3X zFAdps8m%ZK4dLtS!`CNgAAflB_Vvei2S2~QZ5Gr~HeX-=6I?O zoxcC&-at?tYFQ=*X2{a0oBO!v3&x4+pL`GHUyYNY`x*26lw$R{dc(As1T zbBkNN!1-DYHj%UhO415vgWL{THdicAls4xuzOMJA8unhE9i065`pl=^C~K+Ir~x-O z%@Ib^8H-_WZ*OV&g`hk1S;%W>*MJxA+TpHOsh3{`_2S}-@l6^SZ_KX2`N2x8H!HT( zT8a5}YP8x(RSs-5KaWd>`#Ky7nnRS$Qyg)2K->jUn#rF=AHx~ng<1+@l3+}4^s_X1S|{BfshQ_nj4 zKZXtO8~wF1{%d!zw_D%;+1q*Xxc~DIPm2?44x?#r(Q~nl>UiMruSWRls;!V}gzUd( z@K<9{4;)~%aisp`;Z+TYHxXUcf`%^?T-5@}CSs6QOxs@_TJ>c{wRT{Y|MEVORYm-g zH8me7!fMbS5_R7Qs|C${PJ~t6E_;YutRA2HK5N^5pK&ADD*JEmMLqt%|9t29qy6^~ z&t~kuTKG?KkHH4D#RkHER;UhG5dC8gc-=qvXK}Y1i2YgIcs29~f)xuVOZF1mcM>-o zMPvzJWeibExrdG*G9avsA8Hm`0q7lqe_D;#wPJs&TdSY5g=k<{5%}X5x_j79R-Bdl zyr7@f3B3DealPIeTg9uKh5slCSs2%hJKxJvSLV@`xh`jMsFQaLyb|eT57M?2bHbT3 z$s(2v;q1_aY)HZiuNF}3!v|C?7E*RlVsT(Wlr-rW#fVgK#y?$zVJclHL4 z@!t>f)Ffl|uyZpEymlCRfjL;-+Z;xWBhzu4y}IagN6FWafGZAS{{@n8!CCQzBGK*c zxnn+BB-z~!C%o*HjO&Mg-6f(jBb>|%wf|8>Q&gz@pbP&VG!)|>Y^2TBVR>DtmhADF zmZ{dH)r0X1bekr!kN(r9aa=5jwl0Y-n4FDA*l$h`|680?Sg46BB)38luMu;cP$4nZ zw@a=cXuP(b_5oN+QLjSFf3v3kYo3Pw52u*S=pVG>asB6Eo-H^g zQX)4uo0)lst0|^1%4rhQbOITPF31E$aSOaKm#hg8Q%n+=By40vG)*Vp0>^|%dWky1 zz9IM{*|C+1Gyrnyr=(lFQ${8`p5kgmuatTXhfomJTuV@!Lv|YWx=y> zL@t77p`59|8K;xr+5ZGv@GIezab+({fxthC!#3`Q$1n)qq(YJ;5%8ft zu%*E2k&2$=h`q_Qn<;_UbCR!DZ)#iHOXlM525jciXp(x@j8m?+6`u%5&Hkevq*+q{C!?;;~QVVwp8TSy*wAr=Tza{Uc_6 z$^ji1izJ%5lsM&}%@xbVy~$w^{9+Pcq(If2q!F4*svk{PT9r_#uMu}Q^Op%cM1-jl zw>n+7ULF4Rh4?n* zh(0Z%LBUw5N{pmduyF1I0?*e#t-qX zlFo;v*<)HcbZL!ku(m`P1Y3G%qcU$6+`#KkWR@ieH^B{b)dTkbZuOTga9)to zIozz7H08$fg=SIJhUY%IR{VT1B@fzdkxIq~lR$_{zxZ&T2-+4X# zy(Ccu(a!FRSLbkJ5oIoXC&^Moxr|5aA*y}F?OeB`)TjWm2{mCd9?ggnaJ?fIf^BMplU4c&Sf({ zPEr+@@!C8b3YX{296l`+Nfzi3LG>Mqm^3p*_1{{qBxBMimLN`2iZbj9({!!Q!s)KVKvsHtlvVyEiy-tl* zX`EUqlI&QU!ZM>tJ>$*{3CBxHC%I8*@w>GDWryw+P@k81r)FwcF;rR@-hrHPyO9&Z zS*^!4Q}V1%#^X|qc>Nw6zgdp17F%WwBrVC)v%EWdW<&LH;8~1Ts+3dj=imv=0+C+% zp~9SL!5D}l@$xtcNtI#d9dAoO{V&q=`|j+TX#j1I3hgOekO^OEBY*iAUH)Vm*mO-fekFmm5pudnM31Dc zVQyr3R8em|NM&qo0PH;PbKAIb`OLp!pVBu@(vYHL*}8I?52=%0Gw2xv> zxsq@O0UQ9z)^psyy#s)xC`z)F#LvlHgb%R-bkAl)nP@?B z_UQgv9LMqLVBr47aoqkJpA6zhy+J$}oSgPghl59PZx9cUA3=PNG;D2!Hbgy&zj&?o z=Kdg!a4rlnRB)XH04XtqiD{G)V;DwMbW>D@PAFyxl)SPLY(}#Tc>*1y3hZnI5jp`z zd2zjQR3Ss6G2|r2da05v10v}!s5BKkfq6e5Qr2JNs2?2%8R}F~XRyqlxeWQt>|vTjMiMzYL8Vgf`; zMpJi&!Mq}?7sXL8j)Sj#zq?vX{^x`hsP9<;?2!NdU@&OQ|MBtZLH-}2J%(2}Aq6ww zuinXvXND+v3_s7P2C9L;-(SA{DVzwE6Js!g31w&jUSZ0JLYNap$(W%ABVddmiMGN< zxe>4sg#v@QWW=D3f?&$Tn6Ogc7>4!}fL>^Wc}dQDoKkJnB7u(8CRY5kBq_d|qf#_O z*YBjV(c|gv1e1cX)=(!1Qo$#5`W~4e87jM`LMb#g!Y)$^LN}*M358ks8_jc?EwtdF zpP3UGDmOa-kOgBS!DzZj;LYS*n2|z_+ynrIOcR*L(IAS0z>{PbdkhLEsF0_a?5A|P zVYHQPY>i;UE?T)QI zLq4ZU@Ep0(a7E1w7`1X$a$QNvbI?UPv&+AwoL&}VjNr}0N+)Cjt_WY*Nn={U{cepT zZXy^HSCmg3c1F1cA=z(*cH_BSfMaSING+bb{VU9c+dV>BD7JvHBJOM^1{EAFkNPMo zk>=EBFEjwDEZlwO<@X$O$F#lv4{vF?#or6m`_2gLU0)!>)ClDn(FlgB;!Mk}WHOT;{S(fH#ebmS|~^8cnADL>BD?br3>DH@b)&afOG=U zFFr!_E_@ZgZ*<-3X;M*cCeZn-?sh&4fpxUnpM#s5?z7-m$}@4LEBypllrbP#Ry$&b zQ0|ieF7@F2JpDYdAY)|Ws_bq!09LDA33X{5eGRE7m<0u$&M+&iN_1LJXl!B(o^?G< z%hPnDp&bny*!^LKaEUgwX(mT(nB!uwq?~Iza5BNvB+xk*7bO9D;KIUpQAbxxwK(i=ihr#ZxF^Eh|Y*4W6G$ZsH49XV=PlH(T~)WA03`C z8WLG=QgO85-Abm%k;ODR$UA|yUhzxoZaOR!)ALeD~ZBAY=L&GIdhLVYe zcaABCRODmIF)Mv&e{(Bh;G^FfL3La;Pet!NA<=ogMvJT0!c}N&t=`$|Yw4ncncotK ziMO>b2YV-7%Nwj|@|jypQB6!U1cr|`O5!P;9=ne;zbGaXdJUbhvyr$ng~T9`dIcL) zFTiq}O9ujx(vcknzoN(O;Ei3uZb8EsQ&MOIEpmjv)tB!#JE6!j4VLMgyI%x#gU%8*#AD4G}edUk*siMy_ePAyQmAB`Q~BV%OS<mVIM)EWLAhf|Js4hhtBT1 zvjI6PvL!8*X8oz8DQa9#x!lV-XKlxblr5cEXO3@fHca^lzZb%^)yO)QoNzM5EF3R9 zqUxS5d?I9{q*tNvt^;pxGTcakPn2&sBQIOgX1ZO#;WA@Qs@-+9NIpab_u97l|I3`G zR2Y^4;paUJ?8^W52E$eVzu)U0#|QuaA)3_yF3~ul3|*#QN@mUb)eL>;?|n_Hgs29q zXshf1RL+YbYKq3k`Z(AU*=8JVS0^;Agd7VgWjpRhtQM*-4-whtJ)YLQWOQa2+_jLOPw9P6D0d&gyWL zYYnAKO31U`mtZh3R|wx8JRtK5-tOZ5xH(~ZORd&E<}O@PNB?jH0OgwtQ!)hr^Objtr7K)VV%t%&gW- z;K;3Yv;k|w&Q)JsHhn70sw!iXvyM^MF6zqOP zH*>;w)7*sIkK{GZs2-dfJh6O&{$z$&L*K5+Sc0xsYkc~0WtqjxG_|63zPEfDl%W4vwC>jUS;edHng3a z8ZE1O;Py6I8#ZLRVZl+OuxD+`vg&7brWDFFj;eWF)+1jTXq@h-S#{HFQVAnc!4mlS zY}6QIbdDUg9w{-#)%EN;Klm;66l6qJGDLLmTwWLkYw~T*RXRJ`z$gStjO`L zm5{Y#%Wdz-qePQd^F%w)5zk5DZ?pd2}}snR?-P$;%Zb4 zyw)jU#Qy}Of|v6b-CL4W^}gNF?#cA*|97*_Svt3VFE{Q3vJ#v3XhLmMER2q zGFIRm%uBv-X0=-%l<>Wen)030qlK@~+mO9@P zI_t;0<7{C!_MeoYH>k@aSy8_s9M#(O=ue{yzsI&!|0}`po(#aQ`rngbyZ-NFI6VCR z=RsOi|En=cBT|@|Q1l;8kw%w4_~L}h8oU<_zrX-IL`u-ALZ_xeAd)t9uI=(Ru6)z* zv5u?aT0-o*mu4i@K%Z4%j9ZDKc4Ko?;|5rv@f*>acgp{m@{IE7m#TqX@_*WI%m47W zf2jX|koFn!{~w+!zI8Pq7{14eC9YBfckD0#XehW(+m9~BzhP=ho#WA5tYk!kPl-Dp z_WrlZe;MwF#BbqCbigk8?+;H~^*?d%Q2+BVZDYYqi+jJ_=fAIgdVl=3OaAX81lX1T ziQ|*C-~XH*?EizbyX5~H7W^tJanyds+4MxU z_Tts9%I}<)KP~UvK_o!)C+HjKs|L3`VCs!8_IuO-9MtId-CFuTnjy>SlnaITx5m!= z&v9G-502x(LH|EQdkiCD3@ZP-M}PCa_Iq4VX5&1NB)ue4)b;-dqYLSjD(D$922&=+ zzLtve>2pxXh@o?IPvnh$!n5EpaGd%w-X~JwgkEFjAFcoLG=g`WEkJNL(~<-d-On~L z3ZhpRA1;hg7(9mZ=Rv=mT|h>a4x%YFUH7jiKZwTvP+j+L)iIlP?LXC5J?GseqA^J? z3+W2rb?_|GS2B1Ojmc&3EHb(MEEJsv&;BoX48IUXMWNx%tJgY+q!Kpp3nH2!>3VP_ zehZ?xPDO^@KZ0Iu$-g^<+6%NBdVu_IuYbCh|2^#;RV|2K|wg#cCn0Q@%v6951J literal 0 HcmV?d00001 diff --git a/assets/rancher-tracing/rancher-tracing-1.20.100.tgz b/assets/rancher-tracing/rancher-tracing-1.20.100.tgz new file mode 100755 index 0000000000000000000000000000000000000000..82cc6e7febd0374e0a2cf8a34a73c1341b4d765d GIT binary patch literal 3691 zcmV-x4wUg9iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PK8ibK5wQa6a=_^rK{JXIv2_IkqyRs^_XSwkqQr$I(fi>}9vM zToBokFa`kz0A*`DzTbWYfTT!@dWqxp-lU8Vwgfi18;$NpHyV^AJeZ?wYDoghXRVVt zk=kF9h`o5U)^4}k-J>J>zuj(E|F_?E4qtSR+DAvn-NWwNqZjSY(c$sY3ur$c3!7V_ zG?6dbUrnnzxj#uGoC{4f6S0<#{o)Brxe2;q`mcUoJ6Q%5?~Lg(o{6nQfZ9pfh95|3Y%t7jyKa{kHqvNN<{_l z!J^}lIL^Oz{KK~2_Ci#Fq_MVrefXTv;2Ml7X!uWpGn8=YZ~^5?6UMBJAW^1E&k+b? zP0E{sBZL?U-vhXuQw3B37aC$I!X&U`J|~3P`TH=!DN4`+=32+9*J|C|-1xs)dMWE> zZZH)RLezvZ<$LurT$JY1X}7(Ht$)@27lb9K9@hcvu>a2SQMYRU$A|4_`+tfSOR=CK z${sj1^a8;(-s&OI^By!B9v2~wkzt^P?7`i=7mKjZHSJHQlvBMtClh2VfPeh63UG-s zS_d*({b7Mp(hx13X~NiAr#E9_LRjjVG(+lNBCjp67&b9!rdcqFR^eVp$eezK&cE zj^9x)|9ZlRQWw@8+NCBzfJ(g=Mp;Hk!oNwX@jc;755P^(aT1Z+DlBnX~!9{TZx>oL}`>~Lk|FNHA+sH zL)z(ff9O8YH$O$I`hSg4%!o$Sx+19o`#S!*y>m|Q0llpHlG6On`>P_01l0Ap=GOzA9<);3y1DLUiLScFZpSG3@ub${>O-8GFb zq#U7Py0hJ&0r%FD3SBGbK_!N#iKKis4(1ppjPlvpjElT`aEn1=V>Ua+5TQI^Nm!vW z+JLM9zM~HiOB0Jd7Qx{rY$-)E5P4`mZWPUCrc;)k*s^WK?5nw}Z9+tJFdy8;5|zrL z`Rv&>E_*ORP5kn*n%yl20}wGvVmGWaUg&lq;R>JOUlKk;cy*1-*YIjV81()>_#a*D z==N;iAYXBwOK%D@4d2-@e*oqBduW%Tns9e#F~cA5IT0Gyk))E`(eO`*S5#wEw8DKBwF#`0 zf9-Et#u^Moe~Zh?VY04Tr|&?V#gzfptGaa7h3TLd*b z%VO_+rKVGhSTHeL{!)@op?AAtY}1Ba{`ev4ux2YOl`xP6sFnOT=*l*2*+XBxKx6jM znL4d`fZpt_IXiS`gZ9*R@2x?QZ>0r-gY~c{IA=6 zKL3B3R&+@2SnVo}hp%LMu-34FZzY&WqrbEWP_-}nlj)S+LZi9PtK;uPBGCvWr!h?!Tdqn4K;qbP?t6bjhuy+!L&MBM!2|=6 zCot6$-j{ zcV&gP-44pen`tCR;@H7v;cQx+%3`89N~&#Pponnn&g@q?tnFSWY58@$*J++n@EP zFqR@hJ;%fzrKmuX7z_3PI-uCrIvSm$!|sobs&jWmE0>z&e7PVNwD?3KwgdDJFzF80 z@Ixi^wbM!$)U?!1qby~zPGi&Z<|;O~<*gnx3>WEYZV7>mN*7izWpq`ngZum5`g2WY zaDU&ZQZ3dY3xm>-r~*>XDUdI!sWP1cey&okmjCi1UnL`U!uhFx`ff0~8Vr9PydRAE z=j*A~x|*y;kB9w>tMTQBQUBfG>hjZYP&+(jU^Dg8v(ez>^26xU)yMw%uk}b;4ok~e z+g_-C8GPD`=o*)eU8evU@{oLyY~e0e#%8h#jE){^+~sMD$A@%`Yv z;p~0?zppMw{gc7CcJ#V3xss;8s%BPHE5rX^zYa#9u72u|2Uo-X<KnbQ}(&m@jKMzPJr0nK4?{YT}9P6nZgr^ zB&3gx{wwWjVT;`FMZ$IEj3QekqresVkpXx2E>i8R`nqhwdDXq%)8+tMo$LxK_phBp zC`P8Wqsv8;(N-iy&PnNLdP^zI_}UyF&H=3jBj_1reK&G}!f;zzm$ zxFi32b9{VUJ^yPTKIeZ=()M@+_#h`Hk-}Tl&wTaqUS==-)&yvU;kip zpIjIt!SE-_L&|60k|d;HIKrvD5f{^UQY7&fad-eLX?)4#RWdQ@sGU|5YSqiIx;MTD zF5ow{atSj5Kr>&rRJgItD+~8GEtl@uUXv}|`?RIREOfG{DJl5++Ey~4E*DdH0nlL=*1 z(`W|Cnaeeb>%2G1OhS1g3ePD|Zp#QQvcA3VtCBco7$Mh$t#tkI zBvc5en|ZuyzbJcQ+!;b$U|fh||J|E?P1_RxErO?s|JvR4`ft1aT>pKN_P|syb=S4} zUWpXfJ@{Hq_}KnBKjdF~Z(#jYtqS8PtzIoi$u(<+u7GN`#r4-A@|PO!?)DOQ3jeQ7 zgg4uNdWP``K46FaAHF?)yLSJh`)vPD(zc~_HKv=rez>p2u2rIT-73zsj+9G9ZfJIO zy0_^Xbm`Rb-xJ-r7lI0;vKg5NkY3i6I(I9)>X$NjUMlIi!|58Z;_F7mi_}7kK(HQM zo(!|Qi$u;)7hg-NmO-~k$;9t3vv*@@lm=VMUM2aveVq#W!!sW)g^k4wH|mDOG37H5 ziMDX5_P~S$*T}=yaAR+g0Le7YN+V41`CHE%YLwHx)%Bp=q=Miv72;7=+i~XSZ>tq< zq!w0n=YTI;?k^O7x}gVxIU@hZ)3Eh7_wto$ZkSPXu667|saEuZw`N;;Z`BGn;+@Gj zDC0eD*Wpcjt9bWc);0fM-T(bLvWVIn^N+U2?27-7-&D_kkB*-A|EFm$VMw&uK@?~K zZsFl(jvOWlWufubF$u2843+P_bU$&B`ga9tj*P*KiHW;GL;38rF+QT{0&SJG&`)^i zy#$Uk_e%=j#S*9V7DKq9dJg~Zdmlbdn;zj^*b1tP@OpP^_~wg1te zfBJslN8uyV*qQ%#s`>w$?sNYCG%XA1Jg*NUrwi$I-w9C|g5WlCOXI%6uLHPWOey_1 zim*SY;yWkdkl4Gz_P`UCBC&UnrP=oh4n-K#DXbQoiT?2OJz+ z2#vji1Gt={VTuMtHnM=U$j8l`2J`HPSXNi6r$QEfmc@r(723J|qx^#}|NL&-kF$Ia zp$HOt&S+_A0U>IX5#^W>F7je#ZH2%wWnn3jA|LjF^8XCJ;<-Jy=k{&2{|x{D|No(6 J4L|@+004UVK8OGS literal 0 HcmV?d00001 diff --git a/assets/rancher-webhook/rancher-webhook-0.1.000.tgz b/assets/rancher-webhook/rancher-webhook-0.1.000.tgz new file mode 100755 index 0000000000000000000000000000000000000000..27a271e90f2a0bb1d10cf87788d43230f6306d3b GIT binary patch literal 1210 zcmV;r1V#HFiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PI=KZsRr(&RI_}Fi`BtvLwq&t6Q2)ivn$6I7NC4XlZ1#p-7da zymbw|d!b+3vK$ASz(#iUT`Z5t8H(gL9L`9>gC)x07o0DJcplv@iPYwr#O&(bU|E*s zxo+`mSyubknmFE-?OLuo@f`2ky|QfA@w_Xr-fsxUkt$8(mGx#@?T`B)Ae;+LG!Hn*!e~e| z<%^pzrYdjuSE&dhAz@Z32fa>E8Qlg~d+IIAxb&*N9&P@w2uo4DuLC&6f5#rT`9HBI zJ^#-E8j}U)%$Jx5MYWLYW`B>``y4=%g%7KeMF2(TY0B7CFdD3V_$|5@dMZ&N*M_lR zVoq4)Rx~Tj0F?YM3JUyrZ*^ZzW^ zjg?7~sL{$co+%G~_}r|&5u+v{(Ih{>yCYzgV35C6$S}}CmPdU|bg=xL%#o>50J)9B z-T?KstNQKO%fa?k9905P4CNq3^9G;@gMX1C z_IHE;5k(fxFxr<^@+r|vzZtvPTz8tbWvm6CHk;;qUvRo6WZ7oZ_{*ZRxsB2w%lw1a znk*p8J~bA*7i(`kyW!j5Kw+y1WZ9tUo(QSc;dCywPSno)H0v-GQv2|e>yE3p3gwCh zcoPI7<@&yJte57;w~pifU(QMJKKH;=@t-?(+VP+5^!cB&VAuZ_D^60q6q5c|01j+5s)JHCBwb16qhRLZMl)MjJO%6yV}UrV#>JMlcn`CobGcW?ox?|<$5kLOK#{+|VV zwAs&`+OMGY9+CF5x^*npgbBmq(T6$!&_pg!x1xeX3M~S`eE9QrYHVYeI;z+YFgpXm zOHb*C!b$#<(v`l&4V>b?ZMEt@j^&J96SVwK&fIN^*wt-XO)ELz5;CPrB#{qR`7@}(z5w+XR$5c8r9Y+;`YYq3|ADtRD-Dk zhLv6Y%u$p4IbamIE-hB&q|#T_d5D)X9xoK2i_oLU-;7rFXr~xje5&BaHsw(VFcG0F zmDrTN7F@{uqEaNiYU$lSDP0kUhj)>k=>SkP#Jeae?lN#Mc&Dc zVQyr3R8em|NM&qo0PK8wSK~Oc`26ipQNO_c0)1j9;nh9aWzS`Snc;3fxP+bAJ?107YC!A5m1!Y5DPNc$QaYunM5{bS>P>eAr z(;i9bk0>PNtOMh=N7A(Tw-vO49WO!|3YseOsEbNMmco<mEpLAPm+JqR#2LyjVgOd?|JK&t)~x>TY`y6Jv$Qoh!U4%* z1$Mzb zx8e$&PM#7qIu$seHyx;joy*Hwc?g6qiZKoGoz-kmBcgIOm${OAvI5ir4*;3wDC&SV zhMu>kaV9Be$dm*b3XSP6s4#6Lm~b&^gxpzXH#27_c_u=Hm?l(dpC;TB>4r97AB>+c;w{n4i<>qM ziija9qf3QU(@V+GR40L7?b#30AW4QF}`#UYaj7q>jsh%}wY zH&iNl=N9@z3n`ptaSUAyg?5<(qfrQM5rBjt&zRDPBIsLzb4rj)x$f_}n;F)sAo$HB zm;UnX|K1k!KdUYE|AlpK_C^?|C}g10r-cG5{Qu6*?u`H6YVGd5`2S~VYjEZwa_{!m z*A{>>gqk&+J!cSl!nxWS_;+^(kPaaD(T0fDRpdr9I?6FF0wZJeb!4>R4BXNCu7=fyUmMxYz&ja$OH(Wo=XInR zP}yfx!8MIzZO4tZBs$L0#5-!wwdtHj9F*BXEr!r>9Ys`|ZxT-pr*-9clWJd~$%{D~ zqRjh5c66z9k|XUfIfQv*x(~H+&pVJvPXLPmZki7nOT{cmOvWg5T&(q@_Lp|h-tk*C2o6a~`ZT7B zq71Z3x#T2nFhAI&dy07`4T{Kf$jVSB1VWn8CUH07`@( zzd!if+vB6&;fJ?xj}Om3oOLVouf{bqen}b@k}|$uzzTrJ%hi~~G$IP8Iqw6!^rBR` z=(V1j|MeT_?8C+*P<+yv9iBih#{BembAJs|!M&9+HG(x+|) zuG;_K-PxXr|J$v-*Dvw^bF{?<3-0c2Z|||G+Ca^0re9GOb=(%)JCb?{DiRSzI>koz z17jTDZox~}&z%RX?Cjz=6D0xIAj@>4ER3`0oM`M)1 z_c8B7&C%6-Roe^>q~#Pof$O8|+*^a&k<-!zjA^w>y=m#LJr`DPc|LF6Yg)uEQ|lm%9aW;Mv*2&M-*aCg^P3Z}?#O0%XqNr+aK zi&}auN3{mj{DJ-%cAB|&G8FsBl$ZpmY}Y;(y)|D2yHkcoN~Oia(#)Kiv#4ju<32(Eszh95%!&4rbeo7_WyS6B z;Q|tJzy+)MWI=pW#*$C=;lcU&+vDEp+3|NL|LmO}oc~Z-uIaWzzWZp;tRIuR&nMnr zR2_Z`MR2sL@j4*APuqVRPoK)SZMQ46l5{v-=w7 zt5UUj@@JgNGpzMf7siq{w<7a2X&Es{N|@7?1grCi_v_zOH%s1 z;8`j=a8cJRBUA2sTgP+#WA%%%i@M@h$OMilUQ6?KjG}Me*yHs2$3=)|=W-se%=*7z zs7M&1l#jxn@|cE`d5kHKB!-!wYGObrRuJlTQO_h+dYU70-TQ&7sLApD&k&%$Q5I1) z{3_*$$9RSVO>w>;_i*n4%xlkMd391^q}O8WR`yRmd-4{1D{*%>x5ULl&W!#QF5;53 zbM{k44Z7g-#-&NtT>t5?Gp7r4lWX<4Id1n+k$j?0dDL~vd}`i*jdPvvP^N;8X^caR zIxrxy)Ow(!;Pp-grcN22cfxX?`?OhLGAgxXKLg)mN z=mU!S1NM1evH%~M40}{vl8lxZFBh$w>Nk{&r{jzN70Z>Ku zQ-uvXi*xsvNdHT<2gLtwxBclUz>4_4-EPgE|Fw2^w_oD_=V;EA5jMB6@04rt%atDj zI4A#Adz}2dq_opxbYyyiBfqipoTntI;EIPl?!fussk`hWhNz13n&qA4gHHGE4SQa! z?tfN$xcztcpf;8_K53EPw11zWVU4n3L8skP%4g-G*VDB1dijZyz#-mrpnjdu-bV0hvtB$m z=N5^%{ZP-Zkgm7$_i9RZ2Xds38ZyV3Ncn5rQ-Ux_Li3$UPyZfcPjN-!d+Ax)_X%X$ z)U5%GhBK7vdHJ*cQG70w5_bonzHp!jO{`|$Si(9v2aEVwZ?A7XY3@SV%P--kf`=$2 z7jof@*+g|AwMqWHf>gkNPN7EHh#YZwdE#W+|tU@bgQV|y7A2xPc>i>fg#t9uVF7R{5So!_;_U>%`hwYd8FVEA~;FKtZ!ro)I z>#Z{r2K|i25oJSgbp>Qm{Xu4_$&EmcFpgmu^S*UBlnon3E44{88#$%tghk#OFdW)T zfgAP?21amA)d>FIW&j^pJOR#3PfZe}C?KW`11~u0_PUA-G-Vu;gfsZ@unQ3t(hG)E zHO)^;z8CcWEt=+M{$Mn0>L2;Pa?F}jM12xoWvQuOA-z|DyiUDWL7!ZCuL70mzqz18 z@74c$Yw#lxlxGr7j*g`lq=Nr~q4EM6A!%B0!GH0Bu?%^H%|C*EuWC#6|LpkS=-qLU zM4vy#3jN>S-I>+@-QBId7yW;Zwzj;Ukav*Z4R;re`$S>{oS7TM7x&ffD3E;Icxu&; zO>pXEEX7!>T|9x1#5#YtPN+WxG`EwbtJXxOUy=hKIhw;WlUq$OpJg$crvib*k{7qI&1LiA z0{DUoEgBDrB72QpV913r>E<+B!zUz#pYy5z86*md(pqFSLMq@x*C#{9rJ^C2^EwnM z5z5r;S%ddHLO8QKUx_K`;dLnsv6#S!qqLPqtj{PN&+=&wHS)#z5y4@D*CJ%ZDFU)hUO&9OCFgD z1$l)4^Ua!54W}8?NtrIc1+$DafW({)C5?1{kuj#HCFJQuYZ(D86g13YBEWqpsGnba zL3^N=NAPS|BF%B2g}g$l1E?xEl|cG2&Wb=}K3t0@Q?9FWv{{0jZ!n^d4QxNH9(rUZpCHI;m*hW?}t!5D@5PVmJ4Z^@Y}3WzEbY>uRj z@=!t77{~l&AhW)$g|pG#-1JOkl%EJyplBRVEo$!PoSDbS*mVUl5~*Npc(&6wotZhl zuSYaS*rfjG%2W~)_G`pkNRYWMi8zZLROK7*IYj+Q)cb-?J{s(|XgYInR>yV=A?CS>LGZcekxOtHM*{o1T|@ z8)Ng0NP_RDu5TI9i*7%zt<-;a+46J7SfT$r^Yx!!xA%5l^#3{9%J{Dc+3dA0zqsh- zLbj9$lE6ms`RRpufC|ONJHjGjZYA!+y7}U?0p^1M9ipAm6l2QtQ0q!$xcLz-Y|!;_ z{WQPow&|H)+6bMaRK5xO?9X*uz3Fy(opJyhzNs&@5Y+yCK1mUZ+nSr@{<^I}7$dP; zHj*Un3}ZlJtbmJC``aV!!Q#P!T*kn5UWF$CKpcZ*~%yQkSYIt7RLq3JFj#Z zS4XQ}>FImCb^|&5`X9qau@aRrOZe|ft9LF4lLHj~cbb{UE#2($bVb;Bbf+V!RxGJu zDn>HM`8krRyg=)bR6h_oLgB(o&5C|df}a9Q5#9^j6=6kmrq}}uhQF4Om}j%nQd%s# zV5MbSXIUJ>J{22Q*46&eCM~Sccr~kv+3sk zFSc{Na-q6?sE3`)`ZQpfzj9y1EtPIdfW12#pd3+=18&rBTX&0Qzy>U(qH>k2+4fsF zOVeb<4YTPjTj(;>7g*MUK)p_98MEtP5-JxE%Eb}k`g|$G{YTVYuX*bUxjDmMGnC47 z-Tmw+v*pYW$oI*9 zAACRDj~t2vF?9WXKP))Qzp~rEZQX(ID~vHgoxYJM68oLocJ*mOZhYam*}vVYzSMfS z-*h|Qmb$r_Y?LW(RAy(>n+x4T5E;|Kdm+@;1Ni9XqcSDc zVQyr3R8em|NM&qo0PH<$bK5wQ^O?V*PxhAWtVPMs%xcfI=Z!OoYx8i~nVH*)QxQbA zB&_t=zfjK zZ+i-3nf{^s#dFOQ_nkaUN@bblN}65-kTA=H8pR2-R$$CkCqZrbkYn0|AS(;INtU6> zS%N+Ajb5V6)L6_Gj!>ot=Uk@h+$?;rQ6MweCdk#?O*xHN&VNB|xRO1XY(^~4t3SK( zM!XTFXcEnH>zZCO$wp|v59e|Oc8r9B0fTyi8YX@;FquN4fo@T*53q)O358ROD2E>W9a)CSBL7d>El!etusDasMk|78k;aiVBRJYg)n*^F)? z)J}8io9BWj4xwl=BeWZDbi2|0_`g{HCrlJ*zLo*FMgMmXe|Kl+QU4#}S%L!` zvO-w!Jcpsubr~f2ZsSF+v_&09OYm;Y4R8Yt{<;79RePv(#;nB@hFl1%vXU1R(Tw3L|$6}TXX3@k^v;@!U>J~*Kp$1F@lVkOM|&G+$vhFz}-&u(y~zxCf;Du z-(;Cvf%mkseyMu!yL+4BpcwUF$b>=HAh@w8E4UvkxVcsr(`Nfe*XG`ykGaKH+*nVh z>z<%-yfbARKpQ@NDit5d@2v~K-k zyud$v_Ib^^((o`?WW0t!?T-hU{j3->&c_O9A5IFbQCcwtKOy;vWA_`-vzuP`i zMbk-T!4x^!0>2>!Sun7Q@hAp6RtCv{Cu1113AzE!p1ZvuB}ph*pF8#vqwivP*J%2% z?+yg5Eo3v=X@||(R4A5)4V!!sX)F^0H54*M+I1Bzdjp@awX7Jo(Z;j^FD=253ot+< zOC9Lr=lch*pT}7W100f%VD;2R2?|hpbZA1A+YlqH10RBR29Ip3{%c5 zifOrkjyi&oP=M|C?-_lupC#RMd#mH;MDY}uoI)YFv?yt2CTK@C_*;Q8sdzGET)62~ zxgw4xrTK_UCTQlc3NsA71X(p(tJ04dD`VHfGQE3saNL8R4T2x0L7hv`+?>Vm5;YRD zQq4w6nmBfAJ0pp#N1$rvGAzNb<0+hH{A0P_{LQu2TS(3w{!&z0zKErsn2b%GW5=dW z6bA^pkab$t!El-Q$+;!q4lt6Nmtro|lw4Eq*3D^#b9RMMP#6-1Bds#DV=PSDdwym2 zOi6OEsE_B!u@`iBJ$v!X#+pN6f96DbVZDJuG)|2*{#I~}Y2~T@qcUa{rd9Wai9SBx;#nO3SD{(w;}}Jb+Qc@$cPikP_<#FK{J+`lZgd~x|A%;%N;4Hj zpFXw8Sa6BZ@`3ipU^_nQ5NfxeeR=6R^5Hd>WMPP24a+r`b_lIMna*{l=w<*Y(<{G6 zOYj0EY9@$?&XeewMrILMW&4JO5L5WAV8W#`Y0nOxz^}B+7cz1EE$nW&t=Yf;6IK`m zqq2snrBN{CC{k~h1wNI$g!+=vH#sJ2adbF@kka%(svzC_0BS`)*9*vRp}&mRz5@H<1LY)077{L8DQHCLh7=l6aB3l*AZ8C^^GggHlrwVqr+FLt& zo4axJq!XA}N*o!WpM(90X;Lq1%`|9(>o<;ex#*d=mVyu;dltlR%0878R-@idP-~vz zRXMwMM!^OsjGyl8N7H6`?!uwcjxHoGhRe$+h=l9iK#pF9NJIBv7P;IJqX`o{Wfo`K z`5kn3p%+k&TCREWy**g{B1dAaj?60gY^!5tPKqIQcb-OVy0uwv@^WFdv?8B@LRJ{- zmW@kT>8y~qxN>^BvJOL4$Q1r$nls*LZ|=qc`s1ZCmf)3Z|8<s3eK zw>e77aCzA#|40VE$PWzH4Q?JC{J>>D%=>9>%wnobM{XH@2gzlM(!w^4Eitan!rD-6 zjK0l{sSl}A=y7f~Xao&dj(_3;P5c5awEW!!)2a1qT>&DM%5Q~GcA6jFZnrDbywEb= zrvE9!Oi6=ZavSIt{omQz+_<9uTN{u1{}9h-X*0`n)2USYz=_5Q+AmP)ySk1wq~3oh z1D$xwGhp;UNX&E{v7uuU>!1o>zym{xV1t@U;m#0)KDCax=xV z%OWkf(gc^6y?evAY*c}?Y&2U5T8k`WWLDm{@^dq8twXClq@PKzb7hzNPz{?XQDpd< zmcMa~?UylZXzc)?dJmA%2!Cunn;^XBlG|t3vZk4l`Vt;px3>3Rpt>V**EMqVbwM@CpFs8x-%NEj+@?lJ8GzN7 z|DN&JTvQQwAeFt&iL6q$t!M7Sd38m>pIj7DpfSKKkSbbGvb>)Oe0FUTxLgFk)s)I_ z?NEPb&u#Ia9S(}h5H&6?%>g;qswdhmWFSAr+g74nm+gHqp~8cYh! z?bLPQXk4cW>VfGwyZL^lFxVLB&J%m^{`y!dtia5>_~7j5{Tc9gE>kW?-xD8{z+)T| zq9rG8AUXoLDs-PkqA3Rd!o)h(dr&8hfbEZ7u;p^IT$mD_!{z&7<2*%Pkvfx&0a{j4 zccjvjphZXS{dYuQnJUROpKyVr`sGD(J>Q@`SNg2kczE=z*>H4tFl*3-+&4cP44ERb zNIh6Nrq+EqU2&3ZZbeOy4a9vQ)1ohcLL3}mS^cfHr!MQF=|HnOx{l9pOBI6^eG=7CSZae?m z+1j~M|Mz(Q_b|^A95HKAOIMcQSq|r8lrSi`a7D5?OU~E`O;!CDN&^g-F$w`Ap$0zn z&E;qvGzw<<1f3?-+L=tFC6GAsm%>*4Z5vGeE%^Ufi-C-Wi~Vws66B~saEWmgADnzV zu}Y(>6wERu;g@G8kaBIJc*JeT{q*QZ@!;RO<9?P6<57oxlz*Cu?92cSSaMe6u71%( zPhxYPM^EAbJByygHlyE_=A-Dz|3pjh3)5T`1`ZFNn<&n;BKIPSd5Wy#yKD8YD4v)^ zrP%o%EylTjBy7U>F-NRV(mDpDopP^HcL3KNmy&S2(du+jco; z+g*ch&Zu-vqWUmq1_p?7zT9ig)5}fGMl7;ZYUu0UyMKGj+UXY3fTOa);;;-a8YZlr>hf+-*!{b>NV8oV5DZ zOMS!f%i|Dp1OyU4td6bCO|R1#aXT&saiX&7;IjO8Ak?6fG3wt5?r|sFukBQb#JSm6 zi=*hJIw#Y<4k_m7Qg)@NHo8`Ptk&4cpYz3uS0*pW?lo;zKzcnCHRj3~?h93elo(7D z+40%3Qa`E47+9F1jiHi^8~eKbOQRDRvKhV#*7(gfQ8M8Idt}3w;5Cy46Y#UMeZg#n za!%@+W!_}5JlBd;ihKo;IzHGr*&&bhxJ5 zG3*aXo`o8XJVOWqA>*q>X~EqlEzuj-R6*v*a>cyPsLz9pqIyC4+*?`zp%SMvq~J;K z+Kt!KOBf_h@@X4t+U$I`J%(EWE_h2(T0Wcxr^mNb_ykOk3TNCKA7Qy+mt5_)+oc8a40hkoy9*JCwR#sdMjNSLp6N z%^dYm>8vW>?~lpR3BdYaw;2AjE8$~jBNtKdP8Zl;ZxSnn=giCm2u{W{gaY25ZtnZ# zR}K6p44h^*s%83K*=C=fz~K-R%O|wrR`cYn+~WE*Kd0{%z4U=kItMCVu_vYE_I*E1uO)oL!Jozk}Aio32HtJpDn9)K95G-9C=ur zl$@7=o+eFU;Q28GcPAZ^F(zlG$V>wm;aEjJ#CIEi>)85xcoye>Sf01ZJO**e;)HckNKa+{Lf?l=MT&O0CT~%$JVd60}?)dkisgD zarVKa&3Gq%`scO(-&vzWccHe5xD4XlH_C?Pmcyn02y1Y!$LicC56DcKdK|XMAztkN z%@Rw1+IO7_x=M^AC}44aRApM_YWV!>N_;H z?HnopbJ>5V=hLhLZwRYx_a&i)UH$Lxra3|tHm%jigZtKYB9M7M(vbJ*!ki%&*ywhQ znVMkx&|(JdV@wKd5ZXUcbkdx%wzzQU?^gW|ZwuRgQ@H;~z(1E!^{Yl(Xc^U7-{5(e zD8Q#b92pfS8gtg*U)H$yPBUqSsM|jgF`vh}dG?&>Wc{-T9Tqw!7TK`#RqmvsupXj6^Iy308?}oqCfVj4KX{j;GGpxq8WmyOT zd+7jjq#&YRBH;4UY6xBbowFfIn?{p<{_@SKiuGSxxI5me|(>c0~mTUJ= z%g(DSpSrgPCo9k%HolQHL+eVRVTIL#*%l5U${V;HaAMp^E_hRw=Kwaw5R0WhO1gC* zv|k|`M$x=gThRT0>!_hV&?fG+lHO^E464skx1fDs>{TqUn}%ReaCRjL(^@p+RT^lr zYhj!&TAPJ{2}IcPL-wb28|=_6Fzsz&VGqhxSdc2(yNbI@7VWi)9NOOdaDVq#%9F>v zIUbaGsg+g~{i?ae_DfZ+_1nTmUK~qo+V0{hwBjeA^8}C#aSsxT;P&8W$tH{o_etjz zO#*vhX;oazO3{)~hRac_6yw@-t&<|57}0-GGN?!D|NjZR;1;^krMnOAuzAIdeSX31 zGn%(85xR>&9y>dNsRz&)R0G^-50l&qtm_YxQ=Yn`8I1|@2?Ac^m{9nx3Y!-ehFWD% k@:: + +# comma separated list of domains or ip addresses that will not use the proxy +noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local + +nodeSelector: + kubernetes.io/os: linux + +tolerations: + - key: cattle.io/os + operator: "Equal" + value: "linux" + effect: NoSchedule diff --git a/charts/fleet/fleet/0.3.500/templates/_helpers.tpl b/charts/fleet/fleet/0.3.500/templates/_helpers.tpl new file mode 100755 index 000000000..f652b5643 --- /dev/null +++ b/charts/fleet/fleet/0.3.500/templates/_helpers.tpl @@ -0,0 +1,7 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/charts/fleet/fleet/0.3.500/templates/configmap.yaml b/charts/fleet/fleet/0.3.500/templates/configmap.yaml new file mode 100755 index 000000000..c546c4b97 --- /dev/null +++ b/charts/fleet/fleet/0.3.500/templates/configmap.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: fleet-controller +data: + config: | + { + "agentImage": "{{ template "system_default_registry" . }}{{.Values.agentImage.repository}}:{{.Values.agentImage.tag}}", + "agentImagePullPolicy": "{{ .Values.agentImage.imagePullPolicy }}", + "apiServerURL": "{{.Values.apiServerURL}}", + "apiServerCA": "{{b64enc .Values.apiServerCA}}", + "agentCheckinInterval": "{{.Values.agentCheckinInterval}}", + "ignoreClusterRegistrationLabels": {{.Values.ignoreClusterRegistrationLabels}}, + "bootstrap": { + "paths": "{{.Values.bootstrap.paths}}", + "repo": "{{.Values.bootstrap.repo}}", + "secret": "{{.Values.bootstrap.secret}}", + "branch": "{{.Values.bootstrap.branch}}", + "namespace": "{{.Values.bootstrap.namespace}}", + }, + "webhookReceiverURL": "{{.Values.webhookReceiverURL}}", + "githubURLPrefix": "{{.Values.githubURLPrefix}}" + } diff --git a/charts/fleet/fleet/0.3.500/templates/deployment.yaml b/charts/fleet/fleet/0.3.500/templates/deployment.yaml new file mode 100755 index 000000000..3319aa0f3 --- /dev/null +++ b/charts/fleet/fleet/0.3.500/templates/deployment.yaml @@ -0,0 +1,39 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: fleet-controller +spec: + selector: + matchLabels: + app: fleet-controller + template: + metadata: + labels: + app: fleet-controller + spec: + containers: + - env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- if .Values.proxy }} + - name: HTTP_PROXY + value: {{ .Values.proxy }} + - name: HTTPS_PROXY + value: {{ .Values.proxy }} + - name: NO_PROXY + value: {{ .Values.noProxy }} + {{- end }} + image: '{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}' + name: fleet-controller + imagePullPolicy: "{{ .Values.image.imagePullPolicy }}" + serviceAccountName: fleet-controller + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/fleet/fleet/0.3.500/templates/rbac.yaml b/charts/fleet/fleet/0.3.500/templates/rbac.yaml new file mode 100755 index 000000000..59df51b1f --- /dev/null +++ b/charts/fleet/fleet/0.3.500/templates/rbac.yaml @@ -0,0 +1,106 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: fleet-controller +rules: +- apiGroups: + - gitjob.cattle.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - fleet.cattle.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - "" + resources: + - namespaces + - serviceaccounts + verbs: + - '*' +- apiGroups: + - "" + resources: + - secrets + - configmaps + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + - roles + - rolebindings + verbs: + - '*' + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: fleet-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: fleet-controller +subjects: +- kind: ServiceAccount + name: fleet-controller + namespace: {{.Release.Namespace}} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: fleet-controller +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - '*' + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: fleet-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: fleet-controller +subjects: +- kind: ServiceAccount + name: fleet-controller + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: fleet-controller-bootstrap +rules: +- apiGroups: + - '*' + resources: + - '*' + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: fleet-controller-bootstrap +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: fleet-controller-bootstrap +subjects: +- kind: ServiceAccount + name: fleet-controller-bootstrap + namespace: {{.Release.Namespace}} diff --git a/charts/fleet/fleet/0.3.500/templates/serviceaccount.yaml b/charts/fleet/fleet/0.3.500/templates/serviceaccount.yaml new file mode 100755 index 000000000..bd99d9958 --- /dev/null +++ b/charts/fleet/fleet/0.3.500/templates/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: fleet-controller + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: fleet-controller-bootstrap diff --git a/charts/fleet/fleet/0.3.500/values.yaml b/charts/fleet/fleet/0.3.500/values.yaml new file mode 100755 index 000000000..c07d7bd72 --- /dev/null +++ b/charts/fleet/fleet/0.3.500/values.yaml @@ -0,0 +1,53 @@ +image: + repository: rancher/fleet + tag: v0.3.5 + imagePullPolicy: IfNotPresent + +agentImage: + repository: rancher/fleet-agent + tag: v0.3.5 + imagePullPolicy: IfNotPresent + +# For cluster registration the public URL of the Kubernetes API server must be set here +# Example: https://example.com:6443 +apiServerURL: "" + +# For cluster registration the pem encoded value of the CA of the Kubernetes API server must be set here +# If left empty it is assumed this Kubernetes API TLS is signed by a well known CA. +apiServerCA: "" + +# A duration string for how often agents should report a heartbeat +agentCheckinInterval: "15m" + +# Whether you want to allow cluster upon registration to specify their labels. +ignoreClusterRegistrationLabels: false + +# http[s] proxy server +# proxy: http://@:: + +# comma separated list of domains or ip addresses that will not use the proxy +noProxy: 127.0.0.0/8,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,.svc,.cluster.local + +bootstrap: + # The namespace that will be autocreated and the local cluster will be registered in + namespace: fleet-local + # A repo to add at install time that will deploy to the local cluster. This allows + # one to fully bootstrap fleet, it's configuration and all it's downstream clusters + # in one shot. + repo: "" + secret: "" + branch: master + paths: "" + +global: + cattle: + systemDefaultRegistry: "" + +nodeSelector: + kubernetes.io/os: linux + +tolerations: + - key: cattle.io/os + operator: "Equal" + value: "linux" + effect: NoSchedule \ No newline at end of file diff --git a/charts/longhorn/longhorn-crd/1.1.100/Chart.yaml b/charts/longhorn/longhorn-crd/1.1.100/Chart.yaml new file mode 100755 index 000000000..4f35c7f65 --- /dev/null +++ b/charts/longhorn/longhorn-crd/1.1.100/Chart.yaml @@ -0,0 +1,10 @@ +annotations: + catalog.cattle.io/certified: rancher + catalog.cattle.io/hidden: "true" + catalog.cattle.io/namespace: longhorn-system + catalog.cattle.io/release-name: longhorn-crd +apiVersion: v1 +description: Installs the CRDs for longhorn. +name: longhorn-crd +type: application +version: 1.1.100 diff --git a/charts/longhorn/longhorn-crd/1.1.100/README.md b/charts/longhorn/longhorn-crd/1.1.100/README.md new file mode 100755 index 000000000..d9f7f14b3 --- /dev/null +++ b/charts/longhorn/longhorn-crd/1.1.100/README.md @@ -0,0 +1,2 @@ +# longhorn-crd +A Rancher chart that installs the CRDs used by longhorn. diff --git a/charts/longhorn/longhorn-crd/1.1.100/templates/crds.yaml b/charts/longhorn/longhorn-crd/1.1.100/templates/crds.yaml new file mode 100755 index 000000000..8d33bf24d --- /dev/null +++ b/charts/longhorn/longhorn-crd/1.1.100/templates/crds.yaml @@ -0,0 +1,524 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/name: longhorn + helm.sh/chart: longhorn-1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.1.1 + longhorn-manager: Engine + name: engines.longhorn.io +spec: + group: longhorn.io + names: + kind: Engine + listKind: EngineList + plural: engines + shortNames: + - lhe + singular: engine + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: The current state of the engine + jsonPath: .status.currentState + - name: Node + type: string + description: The node that the engine is on + jsonPath: .spec.nodeID + - name: InstanceManager + type: string + description: The instance manager of the engine + jsonPath: .status.instanceManagerName + - name: Image + type: string + description: The current image of the engine + jsonPath: .status.currentImage + - name: Age + type: date + jsonPath: .metadata.creationTimestamp +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/name: longhorn + helm.sh/chart: longhorn-1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.1.1 + longhorn-manager: Replica + name: replicas.longhorn.io +spec: + group: longhorn.io + names: + kind: Replica + listKind: ReplicaList + plural: replicas + shortNames: + - lhr + singular: replica + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: The current state of the replica + jsonPath: .status.currentState + - name: Node + type: string + description: The node that the replica is on + jsonPath: .spec.nodeID + - name: Disk + type: string + description: The disk that the replica is on + jsonPath: .spec.diskID + - name: InstanceManager + type: string + description: The instance manager of the replica + jsonPath: .status.instanceManagerName + - name: Image + type: string + description: The current image of the replica + jsonPath: .status.currentImage + - name: Age + type: date + jsonPath: .metadata.creationTimestamp +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/name: longhorn + helm.sh/chart: longhorn-1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.1.1 + longhorn-manager: Setting + name: settings.longhorn.io +spec: + group: longhorn.io + names: + kind: Setting + listKind: SettingList + plural: settings + shortNames: + - lhs + singular: setting + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + x-kubernetes-preserve-unknown-fields: true + additionalPrinterColumns: + - name: Value + type: string + description: The value of the setting + jsonPath: .value + - name: Age + type: date + jsonPath: .metadata.creationTimestamp +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/name: longhorn + helm.sh/chart: longhorn-1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.1.1 + longhorn-manager: Volume + name: volumes.longhorn.io +spec: + group: longhorn.io + names: + kind: Volume + listKind: VolumeList + plural: volumes + shortNames: + - lhv + singular: volume + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: The state of the volume + jsonPath: .status.state + - name: Robustness + type: string + description: The robustness of the volume + jsonPath: .status.robustness + - name: Scheduled + type: string + description: The scheduled condition of the volume + jsonPath: .status.conditions['scheduled']['status'] + - name: Size + type: string + description: The size of the volume + jsonPath: .spec.size + - name: Node + type: string + description: The node that the volume is currently attaching to + jsonPath: .status.currentNodeID + - name: Age + type: date + jsonPath: .metadata.creationTimestamp +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/name: longhorn + helm.sh/chart: longhorn-1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.1.1 + longhorn-manager: EngineImage + name: engineimages.longhorn.io +spec: + group: longhorn.io + names: + kind: EngineImage + listKind: EngineImageList + plural: engineimages + shortNames: + - lhei + singular: engineimage + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: State of the engine image + jsonPath: .status.state + - name: Image + type: string + description: The Longhorn engine image + jsonPath: .spec.image + - name: RefCount + type: integer + description: Number of volumes are using the engine image + jsonPath: .status.refCount + - name: BuildDate + type: date + description: The build date of the engine image + jsonPath: .status.buildDate + - name: Age + type: date + jsonPath: .metadata.creationTimestamp +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/name: longhorn + helm.sh/chart: longhorn-1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.1.1 + longhorn-manager: Node + name: nodes.longhorn.io +spec: + group: longhorn.io + names: + kind: Node + listKind: NodeList + plural: nodes + shortNames: + - lhn + singular: node + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: Ready + type: string + description: Indicate whether the node is ready + jsonPath: .status.conditions['Ready']['status'] + - name: AllowScheduling + type: boolean + description: Indicate whether the user disabled/enabled replica scheduling for the node + jsonPath: .spec.allowScheduling + - name: Schedulable + type: string + description: Indicate whether Longhorn can schedule replicas on the node + jsonPath: .status.conditions['Schedulable']['status'] + - name: Age + type: date + jsonPath: .metadata.creationTimestamp +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/name: longhorn + helm.sh/chart: longhorn-1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.1.1 + longhorn-manager: InstanceManager + name: instancemanagers.longhorn.io +spec: + group: longhorn.io + names: + kind: InstanceManager + listKind: InstanceManagerList + plural: instancemanagers + shortNames: + - lhim + singular: instancemanager + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: The state of the instance manager + jsonPath: .status.currentState + - name: Type + type: string + description: The type of the instance manager (engine or replica) + jsonPath: .spec.type + - name: Node + type: string + description: The node that the instance manager is running on + jsonPath: .spec.nodeID + - name: Age + type: date + jsonPath: .metadata.creationTimestamp +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/name: longhorn + helm.sh/chart: longhorn-1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.1.1 + longhorn-manager: ShareManager + name: sharemanagers.longhorn.io +spec: + group: longhorn.io + names: + kind: ShareManager + listKind: ShareManagerList + plural: sharemanagers + shortNames: + - lhsm + singular: sharemanager + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: The state of the share manager + jsonPath: .status.state + - name: Node + type: string + description: The node that the share manager is owned by + jsonPath: .status.ownerID + - name: Age + type: date + jsonPath: .metadata.creationTimestamp +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/name: longhorn + helm.sh/chart: longhorn-1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.1.1 + longhorn-manager: BackingImage + name: backingimages.longhorn.io +spec: + group: longhorn.io + names: + kind: BackingImage + listKind: BackingImageList + plural: backingimages + shortNames: + - lhbi + singular: backingimage + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: Image + type: string + description: The backing image name + jsonPath: .spec.image + - name: Age + type: date + jsonPath: .metadata.creationTimestamp +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/name: longhorn + helm.sh/chart: longhorn-1.1.1 + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/instance: longhorn + app.kubernetes.io/version: v1.1.1 + longhorn-manager: BackingImageManager + name: backingimagemanagers.longhorn.io +spec: + group: longhorn.io + names: + kind: BackingImageManager + listKind: BackingImageManagerList + plural: backingimagemanagers + shortNames: + - lhbim + singular: backingimagemanager + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + x-kubernetes-preserve-unknown-fields: true + status: + x-kubernetes-preserve-unknown-fields: true + subresources: + status: {} + additionalPrinterColumns: + - name: State + type: string + description: The current state of the manager + jsonPath: .status.currentState + - name: Image + type: string + description: The image the manager pod will use + jsonPath: .spec.image + - name: Node + type: string + description: The node the manager is on + jsonPath: .spec.nodeID + - name: DiskUUID + type: string + description: The disk the manager is responsible for + jsonPath: .spec.diskUUID + - name: DiskPath + type: string + description: The disk path the manager is using + jsonPath: .spec.diskPath + - name: Age + type: date + jsonPath: .metadata.creationTimestamp diff --git a/charts/longhorn/longhorn/1.1.100/.helmignore b/charts/longhorn/longhorn/1.1.100/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/longhorn/longhorn/1.1.100/Chart.yaml b/charts/longhorn/longhorn/1.1.100/Chart.yaml new file mode 100755 index 000000000..157a1bff6 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/Chart.yaml @@ -0,0 +1,37 @@ +annotations: + catalog.cattle.io/auto-install: longhorn-crd=match + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: Longhorn + catalog.cattle.io/namespace: longhorn-system + catalog.cattle.io/os: linux + catalog.cattle.io/provides-gvr: longhorn.io/v1beta1 + catalog.cattle.io/release-name: longhorn + catalog.cattle.io/ui-component: longhorn +apiVersion: v1 +appVersion: v1.1.1 +description: Longhorn is a distributed block storage system for Kubernetes. +home: https://github.com/longhorn/longhorn +icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/longhorn/icon/color/longhorn-icon-color.png +keywords: +- longhorn +- storage +- distributed +- block +- device +- iscsi +- nfs +kubeVersion: '>=v1.16.0-r0' +maintainers: +- email: maintainers@longhorn.io + name: Longhorn maintainers +name: longhorn +sources: +- https://github.com/longhorn/longhorn +- https://github.com/longhorn/longhorn-engine +- https://github.com/longhorn/longhorn-instance-manager +- https://github.com/longhorn/longhorn-share-manager +- https://github.com/longhorn/longhorn-manager +- https://github.com/longhorn/longhorn-ui +- https://github.com/longhorn/longhorn-tests +- https://github.com/longhorn/backing-image-manager +version: 1.1.100 diff --git a/charts/longhorn/longhorn/1.1.100/README.md b/charts/longhorn/longhorn/1.1.100/README.md new file mode 100755 index 000000000..e147f8e8f --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/README.md @@ -0,0 +1,32 @@ +# Longhorn Chart + +> **Important**: Please install the Longhorn chart in the `longhorn-system` namespace only. + +> **Warning**: Longhorn doesn't support downgrading from a higher version to a lower version. + +## Source Code + +Longhorn is 100% open source software. Project source code is spread across a number of repos: + +1. Longhorn Engine -- Core controller/replica logic https://github.com/longhorn/longhorn-engine +2. Longhorn Instance Manager -- Controller/replica instance lifecycle management https://github.com/longhorn/longhorn-instance-manager +3. Longhorn Share Manager -- NFS provisioner that exposes Longhorn volumes as ReadWriteMany volumes. https://github.com/longhorn/longhorn-share-manager +4. Longhorn Manager -- Longhorn orchestration, includes CSI driver for Kubernetes https://github.com/longhorn/longhorn-manager +5. Longhorn UI -- Dashboard https://github.com/longhorn/longhorn-ui + +## Prerequisites + +1. A container runtime compatible with Kubernetes (Docker v1.13+, containerd v1.3.7+, etc.) +2. Kubernetes v1.16+ +3. Make sure `curl`, `findmnt`, `grep`, `awk` and `blkid` has been installed in all nodes of the Kubernetes cluster. +4. Make sure `open-iscsi` has been installed, and the `iscsid` daemon is running on all nodes of the Kubernetes cluster. For GKE, recommended Ubuntu as guest OS image since it contains `open-iscsi` already. + +## Uninstallation + +To prevent damage to the Kubernetes cluster, we recommend deleting all Kubernetes workloads using Longhorn volumes (PersistentVolume, PersistentVolumeClaim, StorageClass, Deployment, StatefulSet, DaemonSet, etc). + +From Rancher Cluster Explorer UI, navigate to Apps page, delete app `longhorn` then app `longhorn-crd` in Installed Apps tab. + + +--- +Please see [link](https://github.com/longhorn/longhorn) for more information. diff --git a/charts/longhorn/longhorn/1.1.100/app-readme.md b/charts/longhorn/longhorn/1.1.100/app-readme.md new file mode 100755 index 000000000..cb23135ca --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/app-readme.md @@ -0,0 +1,11 @@ +# Longhorn + +Longhorn is a lightweight, reliable and easy to use distributed block storage system for Kubernetes. Once deployed, users can leverage persistent volumes provided by Longhorn. + +Longhorn creates a dedicated storage controller for each volume and synchronously replicates the volume across multiple replicas stored on multiple nodes. The storage controller and replicas are themselves orchestrated using Kubernetes. Longhorn supports snapshots, backups and even allows you to schedule recurring snapshots and backups! + +**Important**: Please install Longhorn chart in `longhorn-system` namespace only. + +**Warning**: Longhorn doesn't support downgrading from a higher version to a lower version. + +[Chart Documentation](https://github.com/longhorn/longhorn/blob/master/chart/README.md) diff --git a/charts/longhorn/longhorn/1.1.100/questions.yml b/charts/longhorn/longhorn/1.1.100/questions.yml new file mode 100755 index 000000000..143b45014 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/questions.yml @@ -0,0 +1,542 @@ +categories: +- storage +namespace: longhorn-system +questions: +- variable: image.defaultImage + default: "true" + description: "Use default Longhorn images" + label: Use Default Images + type: boolean + show_subquestion_if: false + group: "Longhorn Images" + subquestions: + - variable: image.longhorn.manager.repository + default: rancher/mirrored-longhornio-longhorn-manager + description: "Specify Longhorn Manager Image Repository" + type: string + label: Longhorn Manager Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.manager.tag + default: v1.1.1 + description: "Specify Longhorn Manager Image Tag" + type: string + label: Longhorn Manager Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.engine.repository + default: rancher/mirrored-longhornio-longhorn-engine + description: "Specify Longhorn Engine Image Repository" + type: string + label: Longhorn Engine Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.engine.tag + default: v1.1.1 + description: "Specify Longhorn Engine Image Tag" + type: string + label: Longhorn Engine Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.ui.repository + default: rancher/mirrored-longhornio-longhorn-ui + description: "Specify Longhorn UI Image Repository" + type: string + label: Longhorn UI Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.ui.tag + default: v1.1.1 + description: "Specify Longhorn UI Image Tag" + type: string + label: Longhorn UI Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.instanceManager.repository + default: rancher/mirrored-longhornio-longhorn-instance-manager + description: "Specify Longhorn Instance Manager Image Repository" + type: string + label: Longhorn Instance Manager Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.instanceManager.tag + default: v1_20201216 + description: "Specify Longhorn Instance Manager Image Tag" + type: string + label: Longhorn Instance Manager Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.shareManager.repository + default: rancher/mirrored-longhornio-longhorn-share-manager + description: "Specify Longhorn Share Manager Image Repository" + type: string + label: Longhorn Share Manager Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.shareManager.tag + default: v1_20210416 + description: "Specify Longhorn Share Manager Image Tag" + type: string + label: Longhorn Share Manager Image Tag + group: "Longhorn Images Settings" + - variable: image.longhorn.backingImageManager.repository + default: rancher/mirrored-longhornio-backing-image-manager + description: "Specify Longhorn Backing Image Manager Image Repository" + type: string + label: Longhorn Backing Image Manager Image Repository + group: "Longhorn Images Settings" + - variable: image.longhorn.backingImageManager.tag + default: v1_20210422 + description: "Specify Longhorn Backing Image Manager Image Tag" + type: string + label: Longhorn Backing Image Manager Image Tag + group: "Longhorn Images Settings" + - variable: image.csi.attacher.repository + default: rancher/mirrored-longhornio-csi-attacher + description: "Specify CSI attacher image repository. Leave blank to autodetect." + type: string + label: Longhorn CSI Attacher Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.attacher.tag + default: v2.2.1-lh1 + description: "Specify CSI attacher image tag. Leave blank to autodetect." + type: string + label: Longhorn CSI Attacher Image Tag + group: "Longhorn CSI Driver Images" + - variable: image.csi.provisioner.repository + default: rancher/mirrored-longhornio-csi-provisioner + description: "Specify CSI provisioner image repository. Leave blank to autodetect." + type: string + label: Longhorn CSI Provisioner Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.provisioner.tag + default: v1.6.0-lh1 + description: "Specify CSI provisioner image tag. Leave blank to autodetect." + type: string + label: Longhorn CSI Provisioner Image Tag + group: "Longhorn CSI Driver Images" + - variable: image.csi.nodeDriverRegistrar.repository + default: rancher/mirrored-longhornio-csi-node-driver-registrar + description: "Specify CSI Node Driver Registrar image repository. Leave blank to autodetect." + type: string + label: Longhorn CSI Node Driver Registrar Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.nodeDriverRegistrar.tag + default: v1.2.0-lh1 + description: "Specify CSI Node Driver Registrar image tag. Leave blank to autodetect." + type: string + label: Longhorn CSI Node Driver Registrar Image Tag + group: "Longhorn CSI Driver Images" + - variable: image.csi.resizer.repository + default: rancher/mirrored-longhornio-csi-resizer + description: "Specify CSI Driver Resizer image repository. Leave blank to autodetect." + type: string + label: Longhorn CSI Driver Resizer Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.resizer.tag + default: v0.5.1-lh1 + description: "Specify CSI Driver Resizer image tag. Leave blank to autodetect." + type: string + label: Longhorn CSI Driver Resizer Image Tag + group: "Longhorn CSI Driver Images" + - variable: image.csi.snapshotter.repository + default: rancher/mirrored-longhornio-csi-snapshotter + description: "Specify CSI Driver Snapshotter image repository. Leave blank to autodetect." + type: string + label: Longhorn CSI Driver Snapshotter Image Repository + group: "Longhorn CSI Driver Images" + - variable: image.csi.snapshotter.tag + default: v2.1.1-lh1 + description: "Specify CSI Driver Snapshotter image tag. Leave blank to autodetect." + type: string + label: Longhorn CSI Driver Snapshotter Image Tag + group: "Longhorn CSI Driver Images" +- variable: privateRegistry.registryUrl + label: Private registry URL + description: "URL of private registry. Leave blank to apply system default registry." + group: "Private Registry Settings" + type: string + default: "" +- variable: privateRegistry.registryUser + label: Private registry user + description: "User used to authenticate to private registry" + group: "Private Registry Settings" + type: string + default: "" +- variable: privateRegistry.registryPasswd + label: Private registry password + description: "Password used to authenticate to private registry" + group: "Private Registry Settings" + type: password + default: "" +- variable: privateRegistry.registrySecret + label: Private registry secret name + description: "Longhorn will automatically generate a Kubernetes secret with this name and use it to pull images from your private registry." + group: "Private Registry Settings" + type: string + default: "" +- variable: longhorn.default_setting + default: "false" + description: "Customize the default settings before installing Longhorn for the first time. This option will only work if the cluster hasn't installed Longhorn." + label: "Customize Default Settings" + type: boolean + show_subquestion_if: true + group: "Longhorn Default Settings" + subquestions: + - variable: csi.kubeletRootDir + default: + description: "Specify kubelet root-dir. Leave blank to autodetect." + type: string + label: Kubelet Root Directory + group: "Longhorn CSI Driver Settings" + - variable: csi.attacherReplicaCount + type: int + default: 3 + min: 1 + max: 10 + description: "Specify replica count of CSI Attacher. By default 3." + label: Longhorn CSI Attacher replica count + group: "Longhorn CSI Driver Settings" + - variable: csi.provisionerReplicaCount + type: int + default: 3 + min: 1 + max: 10 + description: "Specify replica count of CSI Provisioner. By default 3." + label: Longhorn CSI Provisioner replica count + group: "Longhorn CSI Driver Settings" + - variable: csi.resizerReplicaCount + type: int + default: 3 + min: 1 + max: 10 + description: "Specify replica count of CSI Resizer. By default 3." + label: Longhorn CSI Resizer replica count + group: "Longhorn CSI Driver Settings" + - variable: csi.snapshotterReplicaCount + type: int + default: 3 + min: 1 + max: 10 + description: "Specify replica count of CSI Snapshotter. By default 3." + label: Longhorn CSI Snapshotter replica count + group: "Longhorn CSI Driver Settings" + - variable: defaultSettings.backupTarget + label: Backup Target + description: "The endpoint used to access the backupstore. NFS and S3 are supported." + group: "Longhorn Default Settings" + type: string + default: + - variable: defaultSettings.backupTargetCredentialSecret + label: Backup Target Credential Secret + description: "The name of the Kubernetes secret associated with the backup target." + group: "Longhorn Default Settings" + type: string + default: + - variable: defaultSettings.allowRecurringJobWhileVolumeDetached + label: Allow Recurring Job While Volume Is Detached + description: 'If this setting is enabled, Longhorn will automatically attaches the volume and takes snapshot/backup when it is the time to do recurring snapshot/backup. +Note that the volume is not ready for workload during the period when the volume was automatically attached. Workload will have to wait until the recurring job finishes.' + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.createDefaultDiskLabeledNodes + label: Create Default Disk on Labeled Nodes + description: 'Create default Disk automatically only on Nodes with the label "node.longhorn.io/create-default-disk=true" if no other disks exist. If disabled, the default disk will be created on all new nodes when each node is first added.' + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.defaultDataPath + label: Default Data Path + description: 'Default path to use for storing data on a host. By default "/var/lib/longhorn/"' + group: "Longhorn Default Settings" + type: string + default: "/var/lib/longhorn/" + - variable: defaultSettings.defaultDataLocality + label: Default Data Locality + description: 'We say a Longhorn volume has data locality if there is a local replica of the volume on the same node as the pod which is using the volume. +This setting specifies the default data locality when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `dataLocality` in the StorageClass +The available modes are: +- **disabled**. This is the default option. There may or may not be a replica on the same node as the attached volume (workload) +- **best-effort**. This option instructs Longhorn to try to keep a replica on the same node as the attached volume (workload). Longhorn will not stop the volume, even if it cannot keep a replica local to the attached volume (workload) due to environment limitation, e.g. not enough disk space, incompatible disk tags, etc.' + group: "Longhorn Default Settings" + type: enum + options: + - "disabled" + - "best-effort" + default: "disabled" + - variable: defaultSettings.replicaSoftAntiAffinity + label: Replica Node Level Soft Anti-Affinity + description: 'Allow scheduling on nodes with existing healthy replicas of the same volume. By default false.' + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.storageOverProvisioningPercentage + label: Storage Over Provisioning Percentage + description: "The over-provisioning percentage defines how much storage can be allocated relative to the hard drive's capacity. By default 200." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 200 + - variable: defaultSettings.storageMinimalAvailablePercentage + label: Storage Minimal Available Percentage + description: "If the minimum available disk capacity exceeds the actual percentage of available disk capacity, the disk becomes unschedulable until more space is freed up. By default 25." + group: "Longhorn Default Settings" + type: int + min: 0 + max: 100 + default: 25 + - variable: defaultSettings.upgradeChecker + label: Enable Upgrade Checker + description: 'Upgrade Checker will check for new Longhorn version periodically. When there is a new version available, a notification will appear in the UI. By default true.' + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.defaultReplicaCount + label: Default Replica Count + description: "The default number of replicas when a volume is created from the Longhorn UI. For Kubernetes configuration, update the `numberOfReplicas` in the StorageClass. By default 3." + group: "Longhorn Default Settings" + type: int + min: 1 + max: 20 + default: 3 + - variable: defaultSettings.defaultLonghornStaticStorageClass + label: Default Longhorn Static StorageClass Name + description: "The 'storageClassName' is given to PVs and PVCs that are created for an existing Longhorn volume. The StorageClass name can also be used as a label, so it is possible to use a Longhorn StorageClass to bind a workload to an existing PV without creating a Kubernetes StorageClass object. By default 'longhorn-static'." + group: "Longhorn Default Settings" + type: string + default: "longhorn-static" + - variable: defaultSettings.backupstorePollInterval + label: Backupstore Poll Interval + description: "In seconds. The backupstore poll interval determines how often Longhorn checks the backupstore for new backups. Set to 0 to disable the polling. By default 300." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 300 + - variable: defaultSettings.autoSalvage + label: Automatic salvage + description: "If enabled, volumes will be automatically salvaged when all the replicas become faulty e.g. due to network disconnection. Longhorn will try to figure out which replica(s) are usable, then use them for the volume. By default true." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly + label: Automatically Delete Workload Pod when The Volume Is Detached Unexpectedly + description: 'If enabled, Longhorn will automatically delete the workload pod that is managed by a controller (e.g. deployment, statefulset, daemonset, etc...) when Longhorn volume is detached unexpectedly (e.g. during Kubernetes upgrade, Docker reboot, or network disconnect). By deleting the pod, its controller restarts the pod and Kubernetes handles volume reattachment and remount. +If disabled, Longhorn will not delete the workload pod that is managed by a controller. You will have to manually restart the pod to reattach and remount the volume. +**Note:** This setting does not apply to the workload pods that do not have a controller. Longhorn never deletes them.' + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.disableSchedulingOnCordonedNode + label: Disable Scheduling On Cordoned Node + description: "Disable Longhorn manager to schedule replica on Kubernetes cordoned node. By default true." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.replicaZoneSoftAntiAffinity + label: Replica Zone Level Soft Anti-Affinity + description: "Allow scheduling new Replicas of Volume to the Nodes in the same Zone as existing healthy Replicas. Nodes don't belong to any Zone will be treated as in the same Zone. By default true." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.volumeAttachmentRecoveryPolicy + label: Volume Attachment Recovery Policy + description: "Defines the Longhorn action when a Volume is stuck with a Deployment Pod on a failed node. `wait` leads to the deletion of the volume attachment as soon as the pods deletion time has passed. `never` is the default Kubernetes behavior of never deleting volume attachments on terminating pods. `immediate` leads to the deletion of the volume attachment as soon as all workload pods are pending. By default wait." + group: "Longhorn Default Settings" + type: enum + options: + - "wait" + - "never" + - "immediate" + default: "wait" + - variable: defaultSettings.nodeDownPodDeletionPolicy + label: Pod Deletion Policy When Node is Down + description: "Defines the Longhorn action when a Volume is stuck with a StatefulSet/Deployment Pod on a node that is down. +- **do-nothing** is the default Kubernetes behavior of never force deleting StatefulSet/Deployment terminating pods. Since the pod on the node that is down isn't removed, Longhorn volumes are stuck on nodes that are down. +- **delete-statefulset-pod** Longhorn will force delete StatefulSet terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods. +- **delete-deployment-pod** Longhorn will force delete Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods. +- **delete-both-statefulset-and-deployment-pod** Longhorn will force delete StatefulSet/Deployment terminating pods on nodes that are down to release Longhorn volumes so that Kubernetes can spin up replacement pods." + group: "Longhorn Default Settings" + type: enum + options: + - "do-nothing" + - "delete-statefulset-pod" + - "delete-deployment-pod" + - "delete-both-statefulset-and-deployment-pod" + default: "do-nothing" + - variable: defaultSettings.allowNodeDrainWithLastHealthyReplica + label: Allow Node Drain with the Last Healthy Replica + description: "By default, Longhorn will block `kubectl drain` action on a node if the node contains the last healthy replica of a volume. +If this setting is enabled, Longhorn will **not** block `kubectl drain` action on a node even if the node contains the last healthy replica of a volume." + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.mkfsExt4Parameters + label: Custom mkfs.ext4 parameters + description: "Allows setting additional filesystem creation parameters for ext4. For older host kernels it might be necessary to disable the optional ext4 metadata_csum feature by specifying `-O ^64bit,^metadata_csum`." + group: "Longhorn Default Settings" + type: string + - variable: defaultSettings.disableReplicaRebuild + label: Disable Replica Rebuild + description: "This setting disable replica rebuild cross the whole cluster, eviction and data locality feature won't work if this setting is true. But doesn't have any impact to any current replica rebuild and restore disaster recovery volume." + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.replicaReplenishmentWaitInterval + label: Replica Replenishment Wait Interval + description: "In seconds. The interval determines how long Longhorn will wait at least in order to reuse the existing data on a failed replica rather than directly creating a new replica for a degraded volume. +Warning: This option works only when there is a failed replica in the volume. And this option may block the rebuilding for a while in the case." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 600 + - variable: defaultSettings.disableRevisionCounter + label: Disable Revision Counter + description: "This setting is only for volumes created by UI. By default, this is false meaning there will be a reivision counter file to track every write to the volume. During salvage recovering Longhorn will pick the repica with largest reivision counter as candidate to recover the whole volume. If revision counter is disabled, Longhorn will not track every write to the volume. During the salvage recovering, Longhorn will use the 'volume-head-xxx.img' file last modification time and file size to pick the replica candidate to recover the whole volume." + group: "Longhorn Default Settings" + type: boolean + default: "false" + - variable: defaultSettings.systemManagedPodsImagePullPolicy + label: System Managed Pod Image Pull Policy + description: "This setting defines the Image Pull Policy of Longhorn system managed pods, e.g. instance manager, engine image, CSI driver, etc. The new Image Pull Policy will only apply after the system managed pods restart." + group: "Longhorn Default Settings" + type: enum + options: + - "if-not-present" + - "always" + - "never" + default: "if-not-present" + - variable: defaultSettings.allowVolumeCreationWithDegradedAvailability + label: Allow Volume Creation with Degraded Availability + description: "This setting allows user to create and attach a volume that doesn't have all the replicas scheduled at the time of creation." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.autoCleanupSystemGeneratedSnapshot + label: Automatically Cleanup System Generated Snapshot + description: "This setting enables Longhorn to automatically cleanup the system generated snapshot after replica rebuild is done." + group: "Longhorn Default Settings" + type: boolean + default: "true" + - variable: defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit + label: Concurrent Automatic Engine Upgrade Per Node Limit + description: "This setting controls how Longhorn automatically upgrades volumes' engines to the new default engine image after upgrading Longhorn manager. The value of this setting specifies the maximum number of engines per node that are allowed to upgrade to the default engine image at the same time. If the value is 0, Longhorn will not automatically upgrade volumes' engines to default version." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 0 + - variable: defaultSettings.backingImageCleanupWaitInterval + label: Backing Image Cleanup Wait Interval + description: "This interval in minutes determines how long Longhorn will wait before cleaning up the backing image file when there is no replica in the disk using it." + group: "Longhorn Default Settings" + type: int + min: 0 + default: 60 + - variable: defaultSettings.guaranteedEngineManagerCPU + label: Guaranteed Engine Manager CPU + description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each engine manager Pod. For example, 10 means 10% of the total CPU on a node will be allocated to each engine manager pod on this node. This will help maintain engine stability during high node workload. + In order to prevent unexpected volume engine crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting: + Guaranteed Engine Manager CPU = The estimated max Longhorn volume engine count on a node * 0.1 / The total allocatable CPUs on the node * 100. + The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting. + If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes. + WARNING: + - Value 0 means unsetting CPU requests for engine manager pods. + - Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Engine Manager CPU' should not be greater than 40. + - One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then. + - This global setting will be ignored for a node if the field \"EngineManagerCPURequest\" on the node is set. + - After this setting is changed, all engine manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES." + group: "Longhorn Default Settings" + type: int + min: 0 + max: 40 + default: 12 + - variable: defaultSettings.guaranteedReplicaManagerCPU + label: Guaranteed Replica Manager CPU + description: "This integer value indicates how many percentage of the total allocatable CPU on each node will be reserved for each replica manager Pod. 10 means 10% of the total CPU on a node will be allocated to each replica manager pod on this node. This will help maintain replica stability during high node workload. + In order to prevent unexpected volume replica crash as well as guarantee a relative acceptable IO performance, you can use the following formula to calculate a value for this setting: + Guaranteed Replica Manager CPU = The estimated max Longhorn volume replica count on a node * 0.1 / The total allocatable CPUs on the node * 100. + The result of above calculation doesn't mean that's the maximum CPU resources the Longhorn workloads require. To fully exploit the Longhorn volume I/O performance, you can allocate/guarantee more CPU resources via this setting. + If it's hard to estimate the usage now, you can leave it with the default value, which is 12%. Then you can tune it when there is no running workload using Longhorn volumes. + WARNING: + - Value 0 means unsetting CPU requests for replica manager pods. + - Considering the possible new instance manager pods in the further system upgrade, this integer value is range from 0 to 40. And the sum with setting 'Guaranteed Replica Manager CPU' should not be greater than 40. + - One more set of instance manager pods may need to be deployed when the Longhorn system is upgraded. If current available CPUs of the nodes are not enough for the new instance manager pods, you need to detach the volumes using the oldest instance manager pods so that Longhorn can clean up the old pods automatically and release the CPU resources. And the new pods with the latest instance manager image will be launched then. + - This global setting will be ignored for a node if the field \"ReplicaManagerCPURequest\" on the node is set. + - After this setting is changed, all replica manager pods using this global setting on all the nodes will be automatically restarted. In other words, DO NOT CHANGE THIS SETTING WITH ATTACHED VOLUMES." + group: "Longhorn Default Settings" + type: int + min: 0 + max: 40 + default: 12 +- variable: persistence.defaultClass + default: "true" + description: "Set as default StorageClass for Longhorn" + label: Default Storage Class + group: "Longhorn Storage Class Settings" + required: true + type: boolean +- variable: persistence.reclaimPolicy + label: Storage Class Retain Policy + description: "Define reclaim policy (Retain or Delete)" + group: "Longhorn Storage Class Settings" + required: true + type: enum + options: + - "Delete" + - "Retain" + default: "Delete" +- variable: persistence.defaultClassReplicaCount + description: "Set replica count for Longhorn StorageClass" + label: Default Storage Class Replica Count + group: "Longhorn Storage Class Settings" + type: int + min: 1 + max: 10 + default: 3 +- variable: persistence.recurringJobs.enable + description: "Enable recurring job for Longhorn StorageClass" + group: "Longhorn Storage Class Settings" + label: Enable Storage Class Recurring Job + type: boolean + default: false + show_subquestion_if: true + subquestions: + - variable: persistence.recurringJobs.jobList + description: 'Recurring job list for Longhorn StorageClass. Please be careful of quotes of input. e.g., [{"name":"backup", "task":"backup", "cron":"*/2 * * * *", "retain":1,"labels": {"interval":"2m"}}]' + label: Storage Class Recurring Job List + group: "Longhorn Storage Class Settings" + type: string + default: +- variable: ingress.enabled + default: "false" + description: "Expose app using Layer 7 Load Balancer - ingress" + type: boolean + group: "Services and Load Balancing" + label: Expose app using Layer 7 Load Balancer + show_subquestion_if: true + subquestions: + - variable: ingress.host + default: "xip.io" + description: "layer 7 Load Balancer hostname" + type: hostname + required: true + label: Layer 7 Load Balancer Hostname +- variable: service.ui.type + default: "Rancher-Proxy" + description: "Define Longhorn UI service type" + type: enum + options: + - "ClusterIP" + - "NodePort" + - "LoadBalancer" + - "Rancher-Proxy" + label: Longhorn UI Service + show_if: "ingress.enabled=false" + group: "Services and Load Balancing" + show_subquestion_if: "NodePort" + subquestions: + - variable: service.ui.nodePort + default: "" + description: "NodePort port number(to set explicitly, choose port between 30000-32767)" + type: int + min: 30000 + max: 32767 + show_if: "service.ui.type=NodePort||service.ui.type=LoadBalancer" + label: UI Service NodePort number +- variable: enablePSP + default: "true" + description: "Setup a pod security policy for Longhorn workloads." + label: Pod Security Policy + type: boolean + group: "Other Settings" diff --git a/charts/longhorn/longhorn/1.1.100/templates/NOTES.txt b/charts/longhorn/longhorn/1.1.100/templates/NOTES.txt new file mode 100755 index 000000000..cca7cd77b --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/NOTES.txt @@ -0,0 +1,5 @@ +Longhorn is now installed on the cluster! + +Please wait a few minutes for other Longhorn components such as CSI deployments, Engine Images, and Instance Managers to be initialized. + +Visit our documentation at https://longhorn.io/docs/ diff --git a/charts/longhorn/longhorn/1.1.100/templates/_helpers.tpl b/charts/longhorn/longhorn/1.1.100/templates/_helpers.tpl new file mode 100755 index 000000000..3fbc2ac02 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/_helpers.tpl @@ -0,0 +1,66 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "longhorn.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "longhorn.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{- define "longhorn.managerIP" -}} +{{- $fullname := (include "longhorn.fullname" .) -}} +{{- printf "http://%s-backend:9500" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{- define "secret" }} +{{- printf "{\"auths\": {\"%s\": {\"auth\": \"%s\"}}}" .Values.privateRegistry.registryUrl (printf "%s:%s" .Values.privateRegistry.registryUser .Values.privateRegistry.registryPasswd | b64enc) | b64enc }} +{{- end }} + +{{- /* +longhorn.labels generates the standard Helm labels. +*/ -}} +{{- define "longhorn.labels" -}} +app.kubernetes.io/name: {{ template "longhorn.name" . }} +helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/version: {{ .Chart.AppVersion }} +{{- end -}} + + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} + +{{- define "registry_url" -}} +{{- if .Values.privateRegistry.registryUrl -}} +{{- printf "%s/" .Values.privateRegistry.registryUrl -}} +{{- else -}} +{{ include "system_default_registry" . }} +{{- end -}} +{{- end -}} + +{{- /* + define the longhorn release namespace +*/ -}} +{{- define "release_namespace" -}} +{{- if .Values.namespaceOverride -}} +{{- .Values.namespaceOverride -}} +{{- else -}} +{{- .Release.Namespace -}} +{{- end -}} +{{- end -}} diff --git a/charts/longhorn/longhorn/1.1.100/templates/clusterrole.yaml b/charts/longhorn/longhorn/1.1.100/templates/clusterrole.yaml new file mode 100755 index 000000000..cd5aafb50 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/clusterrole.yaml @@ -0,0 +1,47 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: longhorn-role + labels: {{- include "longhorn.labels" . | nindent 4 }} +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - "*" +- apiGroups: [""] + resources: ["pods", "events", "persistentvolumes", "persistentvolumeclaims","persistentvolumeclaims/status", "nodes", "proxy/nodes", "pods/log", "secrets", "services", "endpoints", "configmaps"] + verbs: ["*"] +- apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] +- apiGroups: ["apps"] + resources: ["daemonsets", "statefulsets", "deployments"] + verbs: ["*"] +- apiGroups: ["batch"] + resources: ["jobs", "cronjobs"] + verbs: ["*"] +- apiGroups: ["policy"] + resources: ["poddisruptionbudgets"] + verbs: ["*"] +- apiGroups: ["scheduling.k8s.io"] + resources: ["priorityclasses"] + verbs: ["watch", "list"] +- apiGroups: ["storage.k8s.io"] + resources: ["storageclasses", "volumeattachments", "csinodes", "csidrivers"] + verbs: ["*"] +- apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses", "volumesnapshots", "volumesnapshotcontents", "volumesnapshotcontents/status"] + verbs: ["*"] +- apiGroups: ["longhorn.io"] + resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings", + "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status", + "sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", "backingimagemanagers", "backingimagemanagers/status"] + verbs: ["*"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["*"] +- apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list"] diff --git a/charts/longhorn/longhorn/1.1.100/templates/clusterrolebinding.yaml b/charts/longhorn/longhorn/1.1.100/templates/clusterrolebinding.yaml new file mode 100755 index 000000000..66ac62f9b --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/clusterrolebinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: longhorn-bind + labels: {{- include "longhorn.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: longhorn-role +subjects: +- kind: ServiceAccount + name: longhorn-service-account + namespace: {{ include "release_namespace" . }} diff --git a/charts/longhorn/longhorn/1.1.100/templates/daemonset-sa.yaml b/charts/longhorn/longhorn/1.1.100/templates/daemonset-sa.yaml new file mode 100755 index 000000000..636a4c0c2 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/daemonset-sa.yaml @@ -0,0 +1,125 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-manager + name: longhorn-manager + namespace: {{ include "release_namespace" . }} +spec: + selector: + matchLabels: + app: longhorn-manager + template: + metadata: + labels: {{- include "longhorn.labels" . | nindent 8 }} + app: longhorn-manager + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + containers: + - name: longhorn-manager + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + command: + - longhorn-manager + - -d + - daemon + - --engine-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.engine.repository }}:{{ .Values.image.longhorn.engine.tag }}" + - --instance-manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.instanceManager.repository }}:{{ .Values.image.longhorn.instanceManager.tag }}" + - --share-manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.shareManager.repository }}:{{ .Values.image.longhorn.shareManager.tag }}" + - --backing-image-manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.backingImageManager.repository }}:{{ .Values.image.longhorn.backingImageManager.tag }}" + - --manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}" + - --service-account + - longhorn-service-account + ports: + - containerPort: 9500 + name: manager + readinessProbe: + tcpSocket: + port: 9500 + volumeMounts: + - name: dev + mountPath: /host/dev/ + - name: proc + mountPath: /host/proc/ + - name: longhorn + mountPath: /var/lib/longhorn/ + mountPropagation: Bidirectional + - name: longhorn-default-setting + mountPath: /var/lib/longhorn-setting/ + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: DEFAULT_SETTING_PATH + value: /var/lib/longhorn-setting/default-setting.yaml + volumes: + - name: dev + hostPath: + path: /dev/ + - name: proc + hostPath: + path: /proc/ + - name: longhorn + hostPath: + path: /var/lib/longhorn/ + - name: longhorn-default-setting + configMap: + name: longhorn-default-setting + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornManager.priorityClass }} + priorityClassName: {{ .Values.longhornManager.priorityClass | quote}} + {{- end }} + serviceAccountName: longhorn-service-account + {{- if .Values.longhornManager.tolerations }} + tolerations: +{{ toYaml .Values.longhornManager.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornManager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }} + {{- end }} + updateStrategy: + rollingUpdate: + maxUnavailable: "100%" +--- +apiVersion: v1 +kind: Service +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-manager + name: longhorn-backend + namespace: {{ include "release_namespace" . }} +spec: + type: {{ .Values.service.manager.type }} + sessionAffinity: ClientIP + selector: + app: longhorn-manager + ports: + - name: manager + port: 9500 + targetPort: manager + {{- if .Values.service.manager.nodePort }} + nodePort: {{ .Values.service.manager.nodePort }} + {{- end }} diff --git a/charts/longhorn/longhorn/1.1.100/templates/default-setting.yaml b/charts/longhorn/longhorn/1.1.100/templates/default-setting.yaml new file mode 100755 index 000000000..e1f0c7ae1 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/default-setting.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: longhorn-default-setting + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +data: + default-setting.yaml: |- + backup-target: {{ .Values.defaultSettings.backupTarget }} + backup-target-credential-secret: {{ .Values.defaultSettings.backupTargetCredentialSecret }} + allow-recurring-job-while-volume-detached: {{ .Values.defaultSettings.allowRecurringJobWhileVolumeDetached }} + create-default-disk-labeled-nodes: {{ .Values.defaultSettings.createDefaultDiskLabeledNodes }} + default-data-path: {{ .Values.defaultSettings.defaultDataPath }} + replica-soft-anti-affinity: {{ .Values.defaultSettings.replicaSoftAntiAffinity }} + storage-over-provisioning-percentage: {{ .Values.defaultSettings.storageOverProvisioningPercentage }} + storage-minimal-available-percentage: {{ .Values.defaultSettings.storageMinimalAvailablePercentage }} + upgrade-checker: {{ .Values.defaultSettings.upgradeChecker }} + default-replica-count: {{ .Values.defaultSettings.defaultReplicaCount }} + default-data-locality: {{ .Values.defaultSettings.defaultDataLocality }} + guaranteed-engine-cpu: {{ .Values.defaultSettings.guaranteedEngineCPU }} + default-longhorn-static-storage-class: {{ .Values.defaultSettings.defaultLonghornStaticStorageClass }} + backupstore-poll-interval: {{ .Values.defaultSettings.backupstorePollInterval }} + taint-toleration: {{ .Values.defaultSettings.taintToleration }} + system-managed-components-node-selector: {{ .Values.defaultSettings.systemManagedComponentsNodeSelector }} + priority-class: {{ .Values.defaultSettings.priorityClass }} + auto-salvage: {{ .Values.defaultSettings.autoSalvage }} + auto-delete-pod-when-volume-detached-unexpectedly: {{ .Values.defaultSettings.autoDeletePodWhenVolumeDetachedUnexpectedly }} + disable-scheduling-on-cordoned-node: {{ .Values.defaultSettings.disableSchedulingOnCordonedNode }} + replica-zone-soft-anti-affinity: {{ .Values.defaultSettings.replicaZoneSoftAntiAffinity }} + volume-attachment-recovery-policy: {{ .Values.defaultSettings.volumeAttachmentRecoveryPolicy }} + node-down-pod-deletion-policy: {{ .Values.defaultSettings.nodeDownPodDeletionPolicy }} + allow-node-drain-with-last-healthy-replica: {{ .Values.defaultSettings.allowNodeDrainWithLastHealthyReplica }} + mkfs-ext4-parameters: {{ .Values.defaultSettings.mkfsExt4Parameters }} + disable-replica-rebuild: {{ .Values.defaultSettings.disableReplicaRebuild }} + replica-replenishment-wait-interval: {{ .Values.defaultSettings.replicaReplenishmentWaitInterval }} + disable-revision-counter: {{ .Values.defaultSettings.disableRevisionCounter }} + system-managed-pods-image-pull-policy: {{ .Values.defaultSettings.systemManagedPodsImagePullPolicy }} + allow-volume-creation-with-degraded-availability: {{ .Values.defaultSettings.allowVolumeCreationWithDegradedAvailability }} + auto-cleanup-system-generated-snapshot: {{ .Values.defaultSettings.autoCleanupSystemGeneratedSnapshot }} + concurrent-automatic-engine-upgrade-per-node-limit: {{ .Values.defaultSettings.concurrentAutomaticEngineUpgradePerNodeLimit }} + backing-image-cleanup-wait-interval: {{ .Values.defaultSettings.backingImageCleanupWaitInterval }} + guaranteed-engine-manager-cpu: {{ .Values.defaultSettings.guaranteedEngineManagerCPU }} + guaranteed-replica-manager-cpu: {{ .Values.defaultSettings.guaranteedReplicaManagerCPU }} diff --git a/charts/longhorn/longhorn/1.1.100/templates/deployment-driver.yaml b/charts/longhorn/longhorn/1.1.100/templates/deployment-driver.yaml new file mode 100755 index 000000000..fb0390a6b --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/deployment-driver.yaml @@ -0,0 +1,104 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: longhorn-driver-deployer + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + app: longhorn-driver-deployer + template: + metadata: + labels: {{- include "longhorn.labels" . | nindent 8 }} + app: longhorn-driver-deployer + spec: + initContainers: + - name: wait-longhorn-manager + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + command: ['sh', '-c', 'while [ $(curl -m 1 -s -o /dev/null -w "%{http_code}" http://longhorn-backend:9500/v1) != "200" ]; do echo waiting; sleep 2; done'] + containers: + - name: longhorn-driver-deployer + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + imagePullPolicy: IfNotPresent + command: + - longhorn-manager + - -d + - deploy-driver + - --manager-image + - "{{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }}" + - --manager-url + - http://longhorn-backend:9500/v1 + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: SERVICE_ACCOUNT + valueFrom: + fieldRef: + fieldPath: spec.serviceAccountName + {{- if .Values.csi.kubeletRootDir }} + - name: KUBELET_ROOT_DIR + value: {{ .Values.csi.kubeletRootDir }} + {{- end }} + {{- if and .Values.image.csi.attacher.repository .Values.image.csi.attacher.tag }} + - name: CSI_ATTACHER_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.attacher.repository }}:{{ .Values.image.csi.attacher.tag }}" + {{- end }} + {{- if and .Values.image.csi.provisioner.repository .Values.image.csi.provisioner.tag }} + - name: CSI_PROVISIONER_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.provisioner.repository }}:{{ .Values.image.csi.provisioner.tag }}" + {{- end }} + {{- if and .Values.image.csi.nodeDriverRegistrar.repository .Values.image.csi.nodeDriverRegistrar.tag }} + - name: CSI_NODE_DRIVER_REGISTRAR_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.nodeDriverRegistrar.repository }}:{{ .Values.image.csi.nodeDriverRegistrar.tag }}" + {{- end }} + {{- if and .Values.image.csi.resizer.repository .Values.image.csi.resizer.tag }} + - name: CSI_RESIZER_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.resizer.repository }}:{{ .Values.image.csi.resizer.tag }}" + {{- end }} + {{- if and .Values.image.csi.snapshotter.repository .Values.image.csi.snapshotter.tag }} + - name: CSI_SNAPSHOTTER_IMAGE + value: "{{ template "registry_url" . }}{{ .Values.image.csi.snapshotter.repository }}:{{ .Values.image.csi.snapshotter.tag }}" + {{- end }} + {{- if .Values.csi.attacherReplicaCount }} + - name: CSI_ATTACHER_REPLICA_COUNT + value: {{ .Values.csi.attacherReplicaCount | quote }} + {{- end }} + {{- if .Values.csi.provisionerReplicaCount }} + - name: CSI_PROVISIONER_REPLICA_COUNT + value: {{ .Values.csi.provisionerReplicaCount | quote }} + {{- end }} + {{- if .Values.csi.resizerReplicaCount }} + - name: CSI_RESIZER_REPLICA_COUNT + value: {{ .Values.csi.resizerReplicaCount | quote }} + {{- end }} + {{- if .Values.csi.snapshotterReplicaCount }} + - name: CSI_SNAPSHOTTER_REPLICA_COUNT + value: {{ .Values.csi.snapshotterReplicaCount | quote }} + {{- end }} + + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornDriver.priorityClass }} + priorityClassName: {{ .Values.longhornDriver.priorityClass | quote}} + {{- end }} + {{- if .Values.longhornDriver.tolerations }} + tolerations: +{{ toYaml .Values.longhornDriver.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornDriver.nodeSelector }} + nodeSelector: +{{ toYaml .Values.longhornDriver.nodeSelector | indent 8 }} + {{- end }} + serviceAccountName: longhorn-service-account + securityContext: + runAsUser: 0 diff --git a/charts/longhorn/longhorn/1.1.100/templates/deployment-ui.yaml b/charts/longhorn/longhorn/1.1.100/templates/deployment-ui.yaml new file mode 100755 index 000000000..e46a84213 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/deployment-ui.yaml @@ -0,0 +1,72 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-ui + name: longhorn-ui + namespace: {{ include "release_namespace" . }} +spec: + replicas: 1 + selector: + matchLabels: + app: longhorn-ui + template: + metadata: + labels: {{- include "longhorn.labels" . | nindent 8 }} + app: longhorn-ui + spec: + containers: + - name: longhorn-ui + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.ui.repository }}:{{ .Values.image.longhorn.ui.tag }} + imagePullPolicy: IfNotPresent + securityContext: + runAsUser: 0 + ports: + - containerPort: 8000 + name: http + env: + - name: LONGHORN_MANAGER_IP + value: "http://longhorn-backend:9500" + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornUI.priorityClass }} + priorityClassName: {{ .Values.longhornUI.priorityClass | quote}} + {{- end }} + {{- if .Values.longhornUI.tolerations }} + tolerations: +{{ toYaml .Values.longhornManager.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornUI.nodeSelector }} + nodeSelector: +{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }} + {{- end }} +--- +kind: Service +apiVersion: v1 +metadata: + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-ui + {{- if eq .Values.service.ui.type "Rancher-Proxy" }} + kubernetes.io/cluster-service: "true" + {{- end }} + name: longhorn-frontend + namespace: {{ include "release_namespace" . }} +spec: + {{- if eq .Values.service.ui.type "Rancher-Proxy" }} + type: ClusterIP + {{- else }} + type: {{ .Values.service.ui.type }} + {{- end }} + selector: + app: longhorn-ui + ports: + - name: http + port: 80 + targetPort: http + {{- if .Values.service.ui.nodePort }} + nodePort: {{ .Values.service.ui.nodePort }} + {{- else }} + nodePort: null + {{- end }} diff --git a/charts/longhorn/longhorn/1.1.100/templates/ingress.yaml b/charts/longhorn/longhorn/1.1.100/templates/ingress.yaml new file mode 100755 index 000000000..13555f814 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/ingress.yaml @@ -0,0 +1,34 @@ +{{- if .Values.ingress.enabled }} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: longhorn-ingress + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn-ingress + annotations: + {{- if .Values.ingress.tls }} + ingress.kubernetes.io/secure-backends: "true" + {{- end }} + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end }} + rules: + - host: {{ .Values.ingress.host }} + http: + paths: + - path: {{ default "" .Values.ingress.path }} + backend: + serviceName: longhorn-frontend + servicePort: 80 +{{- if .Values.ingress.tls }} + tls: + - hosts: + - {{ .Values.ingress.host }} + secretName: {{ .Values.ingress.tlsSecret }} +{{- end }} +{{- end }} diff --git a/charts/longhorn/longhorn/1.1.100/templates/postupgrade-job.yaml b/charts/longhorn/longhorn/1.1.100/templates/postupgrade-job.yaml new file mode 100755 index 000000000..4af75e236 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/postupgrade-job.yaml @@ -0,0 +1,48 @@ +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + "helm.sh/hook": post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation + name: longhorn-post-upgrade + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +spec: + activeDeadlineSeconds: 900 + backoffLimit: 1 + template: + metadata: + name: longhorn-post-upgrade + labels: {{- include "longhorn.labels" . | nindent 8 }} + spec: + containers: + - name: longhorn-post-upgrade + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + command: + - longhorn-manager + - post-upgrade + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + restartPolicy: OnFailure + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornManager.priorityClass }} + priorityClassName: {{ .Values.longhornManager.priorityClass | quote}} + {{- end }} + serviceAccountName: longhorn-service-account + {{- if .Values.longhornManager.tolerations }} + tolerations: +{{ toYaml .Values.longhornManager.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornManager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }} + {{- end }} diff --git a/charts/longhorn/longhorn/1.1.100/templates/psp.yaml b/charts/longhorn/longhorn/1.1.100/templates/psp.yaml new file mode 100755 index 000000000..a2dfc05be --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/psp.yaml @@ -0,0 +1,66 @@ +{{- if .Values.enablePSP }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: longhorn-psp + labels: {{- include "longhorn.labels" . | nindent 4 }} +spec: + privileged: true + allowPrivilegeEscalation: true + requiredDropCapabilities: + - NET_RAW + allowedCapabilities: + - SYS_ADMIN + hostNetwork: false + hostIPC: false + hostPID: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + fsGroup: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - configMap + - downwardAPI + - emptyDir + - secret + - projected + - hostPath +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: longhorn-psp-role + labels: {{- include "longhorn.labels" . | nindent 4 }} + namespace: {{ include "release_namespace" . }} +rules: +- apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - longhorn-psp +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: longhorn-psp-binding + labels: {{- include "longhorn.labels" . | nindent 4 }} + namespace: {{ include "release_namespace" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: longhorn-psp-role +subjects: +- kind: ServiceAccount + name: longhorn-service-account + namespace: {{ include "release_namespace" . }} +- kind: ServiceAccount + name: default + namespace: {{ include "release_namespace" . }} +{{- end }} diff --git a/charts/longhorn/longhorn/1.1.100/templates/registry-secret.yml b/charts/longhorn/longhorn/1.1.100/templates/registry-secret.yml new file mode 100755 index 000000000..1c7565fea --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/registry-secret.yml @@ -0,0 +1,11 @@ +{{- if .Values.privateRegistry.registrySecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .Values.privateRegistry.registrySecret }} + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ template "secret" . }} +{{- end }} \ No newline at end of file diff --git a/charts/longhorn/longhorn/1.1.100/templates/serviceaccount.yaml b/charts/longhorn/longhorn/1.1.100/templates/serviceaccount.yaml new file mode 100755 index 000000000..ad576c353 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/serviceaccount.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: longhorn-service-account + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} diff --git a/charts/longhorn/longhorn/1.1.100/templates/storageclass.yaml b/charts/longhorn/longhorn/1.1.100/templates/storageclass.yaml new file mode 100755 index 000000000..dea6aafd4 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/storageclass.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: longhorn-storageclass + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +data: + storageclass.yaml: | + kind: StorageClass + apiVersion: storage.k8s.io/v1 + metadata: + name: longhorn + annotations: + storageclass.kubernetes.io/is-default-class: {{ .Values.persistence.defaultClass | quote }} + provisioner: driver.longhorn.io + allowVolumeExpansion: true + reclaimPolicy: "{{ .Values.persistence.reclaimPolicy }}" + volumeBindingMode: Immediate + parameters: + numberOfReplicas: "{{ .Values.persistence.defaultClassReplicaCount }}" + staleReplicaTimeout: "30" + fromBackup: "" + baseImage: "" + {{- if .Values.persistence.recurringJobs.enable }} + recurringJobs: '{{ .Values.persistence.recurringJobs.jobList }}' + {{- end }} diff --git a/charts/longhorn/longhorn/1.1.100/templates/tls-secrets.yaml b/charts/longhorn/longhorn/1.1.100/templates/tls-secrets.yaml new file mode 100755 index 000000000..a7ebf13e0 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/tls-secrets.yaml @@ -0,0 +1,16 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: longhorn + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} + app: longhorn +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} diff --git a/charts/longhorn/longhorn/1.1.100/templates/uninstall-job.yaml b/charts/longhorn/longhorn/1.1.100/templates/uninstall-job.yaml new file mode 100755 index 000000000..5f21b1024 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/uninstall-job.yaml @@ -0,0 +1,49 @@ +apiVersion: batch/v1 +kind: Job +metadata: + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-delete-policy": hook-succeeded + name: longhorn-uninstall + namespace: {{ include "release_namespace" . }} + labels: {{- include "longhorn.labels" . | nindent 4 }} +spec: + activeDeadlineSeconds: 900 + backoffLimit: 1 + template: + metadata: + name: longhorn-uninstall + labels: {{- include "longhorn.labels" . | nindent 8 }} + spec: + containers: + - name: longhorn-uninstall + image: {{ template "registry_url" . }}{{ .Values.image.longhorn.manager.repository }}:{{ .Values.image.longhorn.manager.tag }} + imagePullPolicy: IfNotPresent + securityContext: + privileged: true + command: + - longhorn-manager + - uninstall + - --force + env: + - name: LONGHORN_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + restartPolicy: OnFailure + {{- if .Values.privateRegistry.registrySecret }} + imagePullSecrets: + - name: {{ .Values.privateRegistry.registrySecret }} + {{- end }} + {{- if .Values.longhornManager.priorityClass }} + priorityClassName: {{ .Values.longhornManager.priorityClass | quote}} + {{- end }} + serviceAccountName: longhorn-service-account + {{- if .Values.longhornManager.tolerations }} + tolerations: +{{ toYaml .Values.longhornManager.tolerations | indent 6 }} + {{- end }} + {{- if .Values.longhornManager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.longhornManager.nodeSelector | indent 8 }} + {{- end }} diff --git a/charts/longhorn/longhorn/1.1.100/templates/userroles.yaml b/charts/longhorn/longhorn/1.1.100/templates/userroles.yaml new file mode 100755 index 000000000..00dda116a --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/userroles.yaml @@ -0,0 +1,38 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: "longhorn-admin" + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: [ "longhorn.io" ] + resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings", + "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status", + "sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", "backingimagemanagers", "backingimagemanagers/status"] + verbs: [ "*" ] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: "longhorn-edit" + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" +rules: + - apiGroups: [ "longhorn.io" ] + resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings", + "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status", + "sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", "backingimagemanagers", "backingimagemanagers/status"] + verbs: [ "*" ] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: "longhorn-view" + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" +rules: + - apiGroups: [ "longhorn.io" ] + resources: ["volumes", "volumes/status", "engines", "engines/status", "replicas", "replicas/status", "settings", + "engineimages", "engineimages/status", "nodes", "nodes/status", "instancemanagers", "instancemanagers/status", + "sharemanagers", "sharemanagers/status", "backingimages", "backingimages/status", "backingimagemanagers", "backingimagemanagers/status"] + verbs: [ "get", "list", "watch" ] diff --git a/charts/longhorn/longhorn/1.1.100/templates/validate-install-crd.yaml b/charts/longhorn/longhorn/1.1.100/templates/validate-install-crd.yaml new file mode 100755 index 000000000..f93413640 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/templates/validate-install-crd.yaml @@ -0,0 +1,23 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +# {{- $found := dict -}} +# {{- set $found "longhorn.io/v1beta1/Engine" false -}} +# {{- set $found "longhorn.io/v1beta1/Replica" false -}} +# {{- set $found "longhorn.io/v1beta1/Setting" false -}} +# {{- set $found "longhorn.io/v1beta1/Volume" false -}} +# {{- set $found "longhorn.io/v1beta1/EngineImage" false -}} +# {{- set $found "longhorn.io/v1beta1/Node" false -}} +# {{- set $found "longhorn.io/v1beta1/InstanceManager" false -}} +# {{- set $found "longhorn.io/v1beta1/ShareManager" false -}} +# {{- set $found "longhorn.io/v1beta1/BackingImage" false -}} +# {{- set $found "longhorn.io/v1beta1/BackingImageManager" false -}} +# {{- range .Capabilities.APIVersions -}} +# {{- if hasKey $found (toString .) -}} +# {{- set $found (toString .) true -}} +# {{- end -}} +# {{- end -}} +# {{- range $_, $exists := $found -}} +# {{- if (eq $exists false) -}} +# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}} +# {{- end -}} +# {{- end -}} +#{{- end -}} \ No newline at end of file diff --git a/charts/longhorn/longhorn/1.1.100/values.yaml b/charts/longhorn/longhorn/1.1.100/values.yaml new file mode 100755 index 000000000..3032ccb87 --- /dev/null +++ b/charts/longhorn/longhorn/1.1.100/values.yaml @@ -0,0 +1,220 @@ +# Default values for longhorn. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + cattle: + systemDefaultRegistry: "" + +image: + longhorn: + engine: + repository: rancher/mirrored-longhornio-longhorn-engine + tag: v1.1.1 + manager: + repository: rancher/mirrored-longhornio-longhorn-manager + tag: v1.1.1 + ui: + repository: rancher/mirrored-longhornio-longhorn-ui + tag: v1.1.1 + instanceManager: + repository: rancher/mirrored-longhornio-longhorn-instance-manager + tag: v1_20201216 + shareManager: + repository: rancher/mirrored-longhornio-longhorn-share-manager + tag: v1_20210416 + backingImageManager: + repository: rancher/mirrored-longhornio-backing-image-manager + tag: v1_20210422 + csi: + attacher: + repository: rancher/mirrored-longhornio-csi-attacher + tag: v2.2.1-lh1 + provisioner: + repository: rancher/mirrored-longhornio-csi-provisioner + tag: v1.6.0-lh1 + nodeDriverRegistrar: + repository: rancher/mirrored-longhornio-csi-node-driver-registrar + tag: v1.2.0-lh1 + resizer: + repository: rancher/mirrored-longhornio-csi-resizer + tag: v0.5.1-lh1 + snapshotter: + repository: rancher/mirrored-longhornio-csi-snapshotter + tag: v2.1.1-lh1 + pullPolicy: IfNotPresent + +service: + ui: + type: ClusterIP + nodePort: null + manager: + type: ClusterIP + nodePort: "" + +persistence: + defaultClass: true + defaultClassReplicaCount: 3 + reclaimPolicy: Delete + recurringJobs: + enable: false + jobList: [] + +csi: + kubeletRootDir: ~ + attacherReplicaCount: ~ + provisionerReplicaCount: ~ + resizerReplicaCount: ~ + snapshotterReplicaCount: ~ + +defaultSettings: + backupTarget: ~ + backupTargetCredentialSecret: ~ + allowRecurringJobWhileVolumeDetached: ~ + createDefaultDiskLabeledNodes: ~ + defaultDataPath: ~ + defaultDataLocality: ~ + replicaSoftAntiAffinity: ~ + storageOverProvisioningPercentage: ~ + storageMinimalAvailablePercentage: ~ + upgradeChecker: ~ + defaultReplicaCount: ~ + guaranteedEngineCPU: ~ + defaultLonghornStaticStorageClass: ~ + backupstorePollInterval: ~ + taintToleration: ~ + systemManagedComponentsNodeSelector: ~ + priorityClass: ~ + autoSalvage: ~ + autoDeletePodWhenVolumeDetachedUnexpectedly: ~ + disableSchedulingOnCordonedNode: ~ + replicaZoneSoftAntiAffinity: ~ + volumeAttachmentRecoveryPolicy: ~ + nodeDownPodDeletionPolicy: ~ + allowNodeDrainWithLastHealthyReplica: ~ + mkfsExt4Parameters: ~ + disableReplicaRebuild: ~ + replicaReplenishmentWaitInterval: ~ + disableRevisionCounter: ~ + systemManagedPodsImagePullPolicy: ~ + allowVolumeCreationWithDegradedAvailability: ~ + autoCleanupSystemGeneratedSnapshot: ~ + concurrentAutomaticEngineUpgradePerNodeLimit: ~ + backingImageCleanupWaitInterval: ~ + guaranteedEngineManagerCPU: ~ + guaranteedReplicaManagerCPU: ~ +privateRegistry: + registryUrl: ~ + registryUser: ~ + registryPasswd: ~ + registrySecret: ~ + +longhornManager: + priorityClass: ~ + tolerations: [] + ## If you want to set tolerations for Longhorn Manager DaemonSet, delete the `[]` in the line above + ## and uncomment this example block + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + nodeSelector: {} + ## If you want to set node selector for Longhorn Manager DaemonSet, delete the `{}` in the line above + ## and uncomment this example block + # label-key1: "label-value1" + # label-key2: "label-value2" + +longhornDriver: + priorityClass: ~ + tolerations: [] + ## If you want to set tolerations for Longhorn Driver Deployer Deployment, delete the `[]` in the line above + ## and uncomment this example block + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + nodeSelector: {} + ## If you want to set node selector for Longhorn Driver Deployer Deployment, delete the `{}` in the line above + ## and uncomment this example block + # label-key1: "label-value1" + # label-key2: "label-value2" + +longhornUI: + priorityClass: ~ + tolerations: [] + ## If you want to set tolerations for Longhorn UI Deployment, delete the `[]` in the line above + ## and uncomment this example block + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + nodeSelector: {} + ## If you want to set node selector for Longhorn UI Deployment, delete the `{}` in the line above + ## and uncomment this example block + # label-key1: "label-value1" + # label-key2: "label-value2" + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + # + +ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## Add ingressClassName to the Ingress + ## Can replace the kubernetes.io/ingress.class annotation on v1.18+ + ingressClassName: ~ + + host: xip.io + + ## Set this to true in order to enable TLS on the ingress record + ## A side effect of this will be that the backend service will be connected at port 443 + tls: false + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: longhorn.local-tls + + ## Ingress annotations done as key:value pairs + ## If you're using kube-lego, you will want to add: + ## kubernetes.io/tls-acme: true + ## + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + annotations: + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: true + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using kube-lego, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: longhorn.local-tls + # key: + # certificate: + +# Configure a pod security policy in the Longhorn namespace to allow privileged pods +enablePSP: true + +## Specify override namespace, specifically this is useful for using longhorn as sub-chart +## and its release namespace is not the `longhorn-system` +namespaceOverride: "" + +# Annotations to add to the Longhorn Manager DaemonSet Pods. Optional. +annotations: {} diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/Chart.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/Chart.yaml new file mode 100755 index 000000000..03fb6469b --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/Chart.yaml @@ -0,0 +1,21 @@ +annotations: + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: Alerting Drivers + catalog.cattle.io/os: linux + catalog.cattle.io/release-name: rancher-alerting-drivers +apiVersion: v2 +appVersion: 1.16.0 +dependencies: +- condition: prom2teams.enabled + name: prom2teams + repository: file://./charts/prom2teams +- condition: sachet.enabled + name: sachet + repository: file://./charts/sachet +description: The manager for third-party webhook receivers used in Prometheus Alertmanager +keywords: +- monitoring +- alertmanger +- webhook +name: rancher-alerting-drivers +version: 1.0.100 diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/app-readme.md b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/app-readme.md new file mode 100755 index 000000000..ea3f11801 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/app-readme.md @@ -0,0 +1,11 @@ +# Rancher Alerting Drivers + +This chart installs one or more [Alertmanager Webhook Receiver Integrations](https://prometheus.io/docs/operating/integrations/#alertmanager-webhook-receiver) (i.e. Drivers). + +Those Drivers can be targeted by an existing deployment of Alertmanager to send alerts to notification mechanisms that are not natively supported. + +Currently, this chart supports the following Drivers: +- Microsoft Teams, based on [prom2teams](https://github.com/idealista/prom2teams) +- SMS, based on [Sachet](https://github.com/messagebird/sachet) + +After installing rancher-alerting-drivers, please refer to the upstream documentation for each Driver for configuration options. \ No newline at end of file diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/.helmignore b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/.helmignore new file mode 100755 index 000000000..50af03172 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/Chart.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/Chart.yaml new file mode 100755 index 000000000..463385d4b --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/Chart.yaml @@ -0,0 +1,10 @@ +annotations: + catalog.cattle.io/certified: rancher + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.cattle.io/release-name: rancher-prom2teams +apiVersion: v1 +appVersion: 3.2.1 +description: A Helm chart for Prom2Teams based on the upstream https://github.com/idealista/prom2teams +name: prom2teams +version: 0.2.0 diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/files/teams.j2 b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/files/teams.j2 new file mode 100755 index 000000000..f1cf61d4e --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/files/teams.j2 @@ -0,0 +1,44 @@ +{%- set + theme_colors = { + 'resolved' : '2DC72D', + 'critical' : '8C1A1A', + 'severe' : '8C1A1A', + 'warning' : 'FF9A0B', + 'unknown' : 'CCCCCC' + } +-%} + +{ + "@type": "MessageCard", + "@context": "http://schema.org/extensions", + "themeColor": "{% if status=='resolved' %} {{ theme_colors.resolved }} {% else %} {{ theme_colors[msg_text.severity] }} {% endif %}", + "summary": "{% if status=='resolved' %}(Resolved) {% endif %}{{ msg_text.summary }}", + "title": "Prometheus alert {% if status=='resolved' %}(Resolved) {% elif status=='unknown' %} (status unknown) {% endif %}", + "sections": [{ + "activityTitle": "{{ msg_text.summary }}", + "facts": [{% if msg_text.name %}{ + "name": "Alert", + "value": "{{ msg_text.name }}" + },{% endif %}{% if msg_text.instance %}{ + "name": "In host", + "value": "{{ msg_text.instance }}" + },{% endif %}{% if msg_text.severity %}{ + "name": "Severity", + "value": "{{ msg_text.severity }}" + },{% endif %}{% if msg_text.description %}{ + "name": "Description", + "value": "{{ msg_text.description }}" + },{% endif %}{ + "name": "Status", + "value": "{{ msg_text.status }}" + }{% if msg_text.extra_labels %}{% for key in msg_text.extra_labels %},{ + "name": "{{ key }}", + "value": "{{ msg_text.extra_labels[key] }}" + }{% endfor %}{% endif %} + {% if msg_text.extra_annotations %}{% for key in msg_text.extra_annotations %},{ + "name": "{{ key }}", + "value": "{{ msg_text.extra_annotations[key] }}" + }{% endfor %}{% endif %}], + "markdown": true + }] +} diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/NOTES.txt b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/NOTES.txt new file mode 100755 index 000000000..a94c4132b --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/NOTES.txt @@ -0,0 +1,2 @@ +Prom2Teams has been installed. Check its status by running: + kubectl --namespace {{ .Release.Namespace }} get pods -l "app.kubernetes.io/instance={{ .Release.Name }}" diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/_helpers.tpl b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/_helpers.tpl new file mode 100755 index 000000000..ffc0fa356 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/_helpers.tpl @@ -0,0 +1,73 @@ +{{/* vim: set filetype=mustache: */}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "prom2teams.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "prom2teams.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "prom2teams.namespace" -}} +{{ default .Release.Namespace .Values.global.namespaceOverride }} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "prom2teams.labels" -}} +app.kubernetes.io/name: {{ include "prom2teams.name" . }} +helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +app.kubernetes.io/instance: {{ .Release.Name }} +release: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/configmap.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/configmap.yaml new file mode 100755 index 000000000..ccf38953e --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/configmap.yaml @@ -0,0 +1,39 @@ +{{- $valid := list "DEBUG" "INFO" "WARNING" "ERROR" "CRITICAL" -}} +{{- if not (has .Values.prom2teams.loglevel $valid) -}} +{{- fail "Invalid log level"}} +{{- end -}} +{{- if and .Values.prom2teams.connector (hasKey .Values.prom2teams.connectors "Connector") -}} +{{- fail "Invalid configuration: prom2teams.connectors can't have a connector named Connector when prom2teams.connector is set"}} +{{- end -}} +{{/* Create the configmap when the operation is helm install and the target configmap does not exist. */}} +{{- if not (lookup "v1" "ConfigMap" (include "prom2teams.namespace" . ) (include "prom2teams.fullname" .)) }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ include "prom2teams.namespace" . }} + name: {{ include "prom2teams.fullname" . }} + labels: {{ include "prom2teams.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install, pre-upgrade + "helm.sh/hook-weight": "3" + "helm.sh/resource-policy": keep +data: + config.ini: |- + [HTTP Server] + Host: {{ .Values.prom2teams.host }} + Port: {{ .Values.prom2teams.port }} + [Microsoft Teams] + {{- with .Values.prom2teams.connector }} + Connector: {{ . }} + {{- end }} + {{- range $key, $val := .Values.prom2teams.connectors }} + {{ $key }}: {{ $val }} + {{- end }} + [Group Alerts] + Field: {{ .Values.prom2teams.group_alerts_by }} + [Log] + Level: {{ .Values.prom2teams.loglevel }} + [Template] + Path: {{ .Values.prom2teams.templatepath }} + teams.j2: {{ .Files.Get "files/teams.j2" | quote }} + {{- end -}} diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/deployment.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/deployment.yaml new file mode 100755 index 000000000..c7149b9da --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/deployment.yaml @@ -0,0 +1,77 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "prom2teams.fullname" . }} + namespace: {{ include "prom2teams.namespace" . }} + labels: {{ include "prom2teams.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "prom2teams.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "prom2teams.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + spec: + serviceAccountName: {{ include "prom2teams.fullname" . }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: config + configMap: + name: {{ include "prom2teams.fullname" . }} + containers: + - name: {{ .Chart.Name }} + image: {{ include "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 8089 + protocol: TCP + volumeMounts: + - name: config + mountPath: /opt/prom2teams/helmconfig/ + env: + - name: APP_CONFIG_FILE + value: {{ .Values.prom2teams.config | quote }} + - name: PROM2TEAMS_PORT + value: {{ .Values.prom2teams.port | quote }} + - name: PROM2TEAMS_HOST + value: {{ .Values.prom2teams.ip | quote }} + - name: PROM2TEAMS_CONNECTOR + value: {{ .Values.prom2teams.connector | quote }} + - name: PROM2TEAMS_GROUP_ALERTS_BY + value: {{ .Values.prom2teams.group_alerts_by | quote }} + resources: {{ toYaml .Values.resources | nindent 12 }} + {{- if .Values.securityContext.enabled }} + securityContext: + privileged: false + readOnlyRootFilesystem: false + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} + {{- if .Values.nodeSelector }} + {{- toYaml .Values.nodeSelector | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: {{ toYaml . | nindent 8 }} + {{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} + {{- if .Values.tolerations }} + {{- toYaml .Values.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsNonRoot: {{ if eq (int .Values.securityContext.runAsUser) 0 }}false{{ else }}true{{ end }} + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/psp.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/psp.yaml new file mode 100755 index 000000000..37f21f52a --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/psp.yaml @@ -0,0 +1,28 @@ +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "prom2teams.fullname" . }}-psp + labels: {{ include "prom2teams.labels" . | nindent 4 }} +spec: + privileged: false + allowPrivilegeEscalation: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'configMap' diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/role.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/role.yaml new file mode 100755 index 000000000..25391d588 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/role.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "prom2teams.fullname" . }}-psp + namespace: {{ include "prom2teams.namespace" . }} + labels: {{ include "prom2teams.labels" . | nindent 4 }} +rules: + - apiGroups: + - policy + resourceNames: + - {{ include "prom2teams.fullname" . }}-psp + resources: + - podsecuritypolicies + verbs: + - use diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/rolebinding.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/rolebinding.yaml new file mode 100755 index 000000000..3ca8bc252 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/rolebinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "prom2teams.fullname" . }}-psp + namespace: {{ include "prom2teams.namespace" . }} + labels: {{ include "prom2teams.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "prom2teams.fullname" . }}-psp +subjects: + - kind: ServiceAccount + name: {{ include "prom2teams.fullname" . }} diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/service-account.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/service-account.yaml new file mode 100755 index 000000000..a9572c5cd --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/service-account.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "prom2teams.fullname" . }} + namespace: {{ include "prom2teams.namespace" . }} + labels: {{ include "prom2teams.labels" . | nindent 4 }} diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/service.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/service.yaml new file mode 100755 index 000000000..cc95cad35 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/templates/service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "prom2teams.fullname" . }} + namespace: {{ include "prom2teams.namespace" . }} + labels: +{{ include "prom2teams.labels" . | indent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: 8089 + protocol: TCP + name: http + selector: + app.kubernetes.io/name: {{ include "prom2teams.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/values.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/values.yaml new file mode 100755 index 000000000..dcbbd8cfa --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/prom2teams/values.yaml @@ -0,0 +1,62 @@ +# Default values for prom2teams. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +global: + cattle: + systemDefaultRegistry: "" + namespaceOverride: "" + +nameOverride: "prom2teams" +fullnameOverride: "" + +replicaCount: 1 + +image: + repository: rancher/mirrored-idealista-prom2teams + tag: 3.2.1 + pullPolicy: IfNotPresent + +resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 200Mi + +service: + type: ClusterIP + port: 8089 + +prom2teams: + host: 0.0.0.0 + port: 8089 + connector: the-connector-url + connectors: {} + # group_alerts_by can be one of + # ("name" | "description" | "instance" | "severity" | "status" | "summary" | "fingerprint" | "runbook_url") + group_alerts_by: + # loglevel can be one of (DEBUG | INFO | WARNING | ERROR | CRITICAL) + loglevel: INFO + templatepath: /opt/prom2teams/helmconfig/teams.j2 + config: /opt/prom2teams/helmconfig/config.ini + +# Security Context properties +securityContext: + # enabled is a flag to enable Security Context + enabled: true + # runAsUser is the user ID used to run the container + runAsUser: 65534 + # runAsGroup is the primary group ID used to run all processes within any container of the pod + runAsGroup: 65534 + # fsGroup is the group ID associated with the container + fsGroup: 65534 + # readOnlyRootFilesystem is a flag to enable readOnlyRootFilesystem for the Hazelcast security context + readOnlyRootFilesystem: true + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/.helmignore b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/Chart.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/Chart.yaml new file mode 100755 index 000000000..493bd9d9e --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/Chart.yaml @@ -0,0 +1,11 @@ +annotations: + catalog.cattle.io/certified: rancher + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.cattle.io/release-name: rancher-sachet +apiVersion: v2 +appVersion: 0.2.3 +description: A Helm chart for Sachet based on the upstream https://github.com/messagebird/sachet +name: sachet +type: application +version: 1.0.1 diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/files/template.tmpl b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/files/template.tmpl new file mode 100755 index 000000000..08f24e138 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/files/template.tmpl @@ -0,0 +1 @@ +# reference: https://github.com/messagebird/sachet/blob/master/examples/telegram.tmpl diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/NOTES.txt b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/NOTES.txt new file mode 100755 index 000000000..247a91fc1 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/NOTES.txt @@ -0,0 +1,3 @@ +rancher-sachet is now installed on the cluster! +Please refer to the upstream documentation for configuration options: +https://github.com/messagebird/sachet diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/_helpers.tpl b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/_helpers.tpl new file mode 100755 index 000000000..eaa61fee5 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/_helpers.tpl @@ -0,0 +1,79 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "sachet.namespace" -}} +{{ default .Release.Namespace .Values.global.namespaceOverride }} +{{- end }} + +{{/* +Expand the name of the chart. +*/}} +{{- define "sachet.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "sachet.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "sachet.labels" -}} +helm.sh/chart: {{ printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{ include "sachet.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "sachet.selectorLabels" -}} +app.kubernetes.io/name: {{ include "sachet.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + + diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/configmap-pre-install.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/configmap-pre-install.yaml new file mode 100755 index 000000000..e8c63ac03 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/configmap-pre-install.yaml @@ -0,0 +1,34 @@ +{{/*This file is applied when the operation is helm install and the target confimap does not exist. */}} +{{- if not (lookup "v1" "ConfigMap" (include "sachet.namespace" . ) (include "sachet.fullname" .)) }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ include "sachet.namespace" . }} + name: {{ include "sachet.fullname" . }} + labels: {{ include "sachet.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": pre-install, pre-upgrade + "helm.sh/hook-weight": "3" + "helm.sh/resource-policy": keep +data: + config.yaml: |- + {{- if and (not .Values.sachet.providers) (not .Values.sachet.receivers) }} + # please refer to the upstream documentation for configuration options: + # https://github.com/messagebird/sachet + # + # providers: + # aliyun: + # region_id: + # ... + # receivers: + # - name: 'team-sms' + # provider: 'aliyu' + # ... + {{- end }} + {{- with .Values.sachet.providers }} + providers: {{ toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sachet.receivers }} + receivers: {{ toYaml . | nindent 6 }} + {{- end }} +{{- end }} diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/deployment.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/deployment.yaml new file mode 100755 index 000000000..17215eebd --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/deployment.yaml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "sachet.fullname" . }} + namespace: {{ include "sachet.namespace" . }} + labels: {{ include "sachet.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{ include "sachet.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: {{ toYaml . | nindent 8 }} + {{- end }} + labels: {{ include "sachet.selectorLabels" . | nindent 8 }} + spec: + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} + {{- if .Values.nodeSelector }} + {{- toYaml .Values.nodeSelector | nindent 8 }} + {{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} + {{- if .Values.tolerations }} + {{- toYaml .Values.tolerations | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: {{ toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "sachet.fullname" . }} + {{- with .Values.podSecurityContext }} + securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + securityContext: {{ toYaml .Values.securityContext | nindent 12 }} + image: {{ include "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 9876 + protocol: TCP + livenessProbe: + httpGet: + path: /-/live + port: http + readinessProbe: + httpGet: + path: /-/ready + port: http + volumeMounts: + - mountPath: /etc/sachet/ + name: config-volume + {{- with .Values.resources }} + resources: {{ toYaml .Values.resources | nindent 12 }} + {{- end }} + - name: config-reloader + securityContext: {{ toYaml .Values.securityContext | nindent 12 }} + image: {{ include "system_default_registry" . }}{{ .Values.configReloader.repository }}:{{ .Values.configReloader.tag }} + imagePullPolicy: {{ .Values.configReloader.pullPolicy }} + args: + - -volume-dir=/watch-config + - -webhook-method=POST + - -webhook-status-code=200 + - -webhook-url=http://127.0.0.1:{{ .Values.service.port }}/-/reload + volumeMounts: + - mountPath: /watch-config + name: config-volume + volumes: + - name: config-volume + configMap: + name: {{ include "sachet.fullname" . }} + defaultMode: 0777 diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/psp.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/psp.yaml new file mode 100755 index 000000000..1cc5b0895 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/psp.yaml @@ -0,0 +1,28 @@ +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "sachet.fullname" . }}-psp + labels: {{ include "sachet.labels" . | nindent 4 }} +spec: + privileged: false + allowPrivilegeEscalation: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'configMap' diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/role.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/role.yaml new file mode 100755 index 000000000..05d4410e3 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/role.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "sachet.fullname" . }}-psp + namespace: {{ include "sachet.namespace" . }} + labels: {{ include "sachet.labels" . | nindent 4 }} +rules: + - apiGroups: + - policy + resourceNames: + - {{ include "sachet.fullname" . }}-psp + resources: + - podsecuritypolicies + verbs: + - use diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/rolebinding.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/rolebinding.yaml new file mode 100755 index 000000000..174f0d9e8 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/rolebinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "sachet.fullname" . }}-psp + namespace: {{ include "sachet.namespace" . }} + labels: {{ include "sachet.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "sachet.fullname" . }}-psp +subjects: + - kind: ServiceAccount + name: {{ include "sachet.fullname" . }} diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/service-account.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/service-account.yaml new file mode 100755 index 000000000..8833f1b3b --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/service-account.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "sachet.fullname" . }} + namespace: {{ include "sachet.namespace" . }} + labels: {{ include "sachet.labels" . | nindent 4 }} diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/service.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/service.yaml new file mode 100755 index 000000000..216e8322c --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/templates/service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "sachet.fullname" . }} + namespace: {{ include "sachet.namespace" . }} + labels: {{ include "sachet.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + {{- if contains "NodePort" .Values.service.type }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + selector: {{ include "sachet.selectorLabels" . | nindent 4 }} diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/values.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/values.yaml new file mode 100755 index 000000000..b00cf0b18 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/charts/sachet/values.yaml @@ -0,0 +1,63 @@ +# Default values for sachet. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +global: + cattle: + systemDefaultRegistry: "" + namespaceOverride: "" + +nameOverride: "sachet" +fullnameOverride: "" + +configReloader: + repository: rancher/mirrored-jimmidyson-configmap-reload + pullPolicy: IfNotPresent + tag: v0.4.0 + +sachet: + # reference: https://github.com/messagebird/sachet/blob/master/examples/config.yaml + providers: {} + + receivers: [] + +replicaCount: 1 + +image: + repository: rancher/mirrored-messagebird-sachet + pullPolicy: IfNotPresent + tag: 0.2.3 + +imagePullSecrets: [] + +podAnnotations: {} + +podSecurityContext: + +securityContext: + runAsUser: 1000 + runAsNonRoot: true + runAsGroup: 1000 + +service: + type: ClusterIP + port: 9876 + nodePort: 30001 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/questions.yml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/questions.yml new file mode 100755 index 000000000..741808c23 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/questions.yml @@ -0,0 +1,14 @@ +categories: + - monitoring +namespace: cattle-monitoring-system +questions: + - variable: prom2teams.enabled + default: false + label: Enable Microsoft Teams + type: boolean + group: "General" + - variable: sachet.enabled + default: false + label: Enable SMS + type: boolean + group: "General" diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/templates/NOTES.txt b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/templates/NOTES.txt new file mode 100755 index 000000000..59c1415e0 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/templates/NOTES.txt @@ -0,0 +1,2 @@ +rancher-alerting-drivers is now installed on the cluster! +Please refer to the upstream documentation for each Driver for configuration options. \ No newline at end of file diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/templates/_helpers.tpl b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/templates/_helpers.tpl new file mode 100755 index 000000000..e57f6ff74 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/templates/_helpers.tpl @@ -0,0 +1,91 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "drivers.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "drivers.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "drivers.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "drivers.labels" -}} +helm.sh/chart: {{ include "drivers.chart" . }} +{{ include "drivers.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "drivers.selectorLabels" -}} +app.kubernetes.io/name: {{ include "drivers.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "drivers.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "drivers.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{/* +https://github.com/helm/helm/issues/4535#issuecomment-477778391 +Usage: {{ include "call-nested" (list . "SUBCHART_NAME" "TEMPLATE") }} +e.g. {{ include "call-nested" (list . "grafana" "grafana.fullname") }} +*/}} +{{- define "call-nested" }} +{{- $dot := index . 0 }} +{{- $subchart := index . 1 | splitList "." }} +{{- $template := index . 2 }} +{{- $values := $dot.Values }} +{{- range $subchart }} +{{- $values = index $values . }} +{{- end }} +{{- include $template (dict "Chart" (dict "Name" (last $subchart)) "Values" $values "Release" $dot.Release "Capabilities" $dot.Capabilities) }} +{{- end }} + + +{{/* +Get the list of configMaps to be managed +*/}} +{{- define "drivers.configmapList" -}} +{{- if .Values.sachet.enabled -}} +- {{ include "call-nested" (list . "sachet" "sachet.fullname") }} +{{- end }} +{{- if .Values.prom2teams.enabled -}} +- {{ include "call-nested" (list . "prom2teams" "prom2teams.fullname") }} +{{- end }} +{{- end }} diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/templates/cluster-role.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/templates/cluster-role.yaml new file mode 100755 index 000000000..e3022a7ca --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/templates/cluster-role.yaml @@ -0,0 +1,50 @@ +{{- if and (not .Values.sachet.enabled) (not .Values.prom2teams.enabled) -}} +{{- fail "At least one Driver must be enabled to install the chart. " }} +{{- end -}} + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "drivers.fullname" . }}-admin + labels: {{ include "drivers.labels" . | nindent 4 }} + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: + - "" + resources: + - configmaps + resourceNames: {{ include "drivers.configmapList" . | nindent 6 }} + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "drivers.fullname" . }}-edit + labels: {{ include "drivers.labels" . | nindent 4 }} + rbac.authorization.k8s.io/aggregate-to-edit: "true" +rules: + - apiGroups: + - "" + resources: + - configmaps + resourceNames: {{ include "drivers.configmapList" . | nindent 6 }} + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "drivers.fullname" . }}-view + labels: {{ include "drivers.labels" . | nindent 4 }} + rbac.authorization.k8s.io/aggregate-to-view: "true" +rules: + - apiGroups: + - "" + resources: + - configmaps + resourceNames: {{ include "drivers.configmapList" . | nindent 6 }} + verbs: + - 'get' + - 'list' + - 'watch' diff --git a/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/values.yaml b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/values.yaml new file mode 100755 index 000000000..ff9ab90e0 --- /dev/null +++ b/charts/rancher-alerting-drivers/rancher-alerting-drivers/1.0.100/values.yaml @@ -0,0 +1,17 @@ +# Default values for rancher-alerting-driver. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +global: + cattle: + # the registry where all images will be pulled from + systemDefaultRegistry: "" + # set this value if you want the sub-charts to be installed into + # a namespace rather than where this chart is installed + namespaceOverride: "" + +prom2teams: + enabled: false + +sachet: + enabled: false diff --git a/charts/rancher-backup-crd/rancher-backup-crd/1.0.400/Chart.yaml b/charts/rancher-backup-crd/rancher-backup-crd/1.0.400/Chart.yaml new file mode 100755 index 000000000..dd3f43a33 --- /dev/null +++ b/charts/rancher-backup-crd/rancher-backup-crd/1.0.400/Chart.yaml @@ -0,0 +1,11 @@ +annotations: + catalog.cattle.io/certified: rancher + catalog.cattle.io/hidden: "true" + catalog.cattle.io/namespace: cattle-resources-system + catalog.cattle.io/release-name: rancher-backup-crd +apiVersion: v2 +appVersion: 1.0.4 +description: Installs the CRDs for rancher-backup. +name: rancher-backup-crd +type: application +version: 1.0.400 diff --git a/charts/rancher-backup-crd/rancher-backup-crd/1.0.400/README.md b/charts/rancher-backup-crd/rancher-backup-crd/1.0.400/README.md new file mode 100755 index 000000000..046410962 --- /dev/null +++ b/charts/rancher-backup-crd/rancher-backup-crd/1.0.400/README.md @@ -0,0 +1,3 @@ +# Rancher Backup CRD + +A Rancher chart that installs the CRDs used by `rancher-backup`. diff --git a/charts/rancher-backup-crd/rancher-backup-crd/1.0.400/templates/backup.yaml b/charts/rancher-backup-crd/rancher-backup-crd/1.0.400/templates/backup.yaml new file mode 100755 index 000000000..a4b9471c0 --- /dev/null +++ b/charts/rancher-backup-crd/rancher-backup-crd/1.0.400/templates/backup.yaml @@ -0,0 +1,119 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: backups.resources.cattle.io +spec: + additionalPrinterColumns: + - JSONPath: .status.storageLocation + name: Location + type: string + - JSONPath: .status.backupType + name: Type + type: string + - JSONPath: .status.filename + name: Latest-Backup + type: string + - JSONPath: .spec.resourceSetName + name: ResourceSet + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + group: resources.cattle.io + names: + kind: Backup + plural: backups + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + properties: + encryptionConfigSecretName: + description: Name of the Secret containing the encryption config + type: string + resourceSetName: + description: Name of the ResourceSet CR to use for backup + type: string + retentionCount: + minimum: 1 + type: integer + schedule: + description: Cron schedule for recurring backups + example: + Descriptors: '@midnight' + Standard crontab specs: 0 0 * * * + type: string + storageLocation: + nullable: true + properties: + s3: + nullable: true + properties: + bucketName: + type: string + credentialSecretName: + type: string + credentialSecretNamespace: + type: string + endpoint: + type: string + endpointCA: + type: string + folder: + type: string + insecureTLSSkipVerify: + type: boolean + region: + type: string + type: object + type: object + required: + - resourceSetName + type: object + status: + properties: + backupType: + type: string + conditions: + items: + properties: + lastTransitionTime: + type: string + lastUpdateTime: + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + nullable: true + type: array + filename: + type: string + lastSnapshotTs: + type: string + nextSnapshotAt: + type: string + observedGeneration: + type: integer + storageLocation: + type: string + summary: + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true diff --git a/charts/rancher-backup-crd/rancher-backup-crd/1.0.400/templates/resourceset.yaml b/charts/rancher-backup-crd/rancher-backup-crd/1.0.400/templates/resourceset.yaml new file mode 100755 index 000000000..665ef786d --- /dev/null +++ b/charts/rancher-backup-crd/rancher-backup-crd/1.0.400/templates/resourceset.yaml @@ -0,0 +1,94 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: resourcesets.resources.cattle.io +spec: + group: resources.cattle.io + names: + kind: ResourceSet + plural: resourcesets + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + controllerReferences: + items: + properties: + apiVersion: + type: string + name: + type: string + namespace: + type: string + replicas: + type: integer + resource: + type: string + type: object + nullable: true + type: array + resourceSelectors: + items: + properties: + apiVersion: + type: string + kinds: + items: + type: string + nullable: true + type: array + kindsRegexp: + type: string + labelSelectors: + nullable: true + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + nullable: true + type: array + type: object + nullable: true + type: array + matchLabels: + additionalProperties: + type: string + nullable: true + type: object + type: object + namespaceRegexp: + type: string + namespaces: + items: + type: string + nullable: true + type: array + resourceNameRegexp: + type: string + resourceNames: + items: + type: string + nullable: true + type: array + type: object + nullable: true + required: + - apiVersion + type: array + required: + - resourceSelectors + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true diff --git a/charts/rancher-backup-crd/rancher-backup-crd/1.0.400/templates/restore.yaml b/charts/rancher-backup-crd/rancher-backup-crd/1.0.400/templates/restore.yaml new file mode 100755 index 000000000..1ad7d1721 --- /dev/null +++ b/charts/rancher-backup-crd/rancher-backup-crd/1.0.400/templates/restore.yaml @@ -0,0 +1,102 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: restores.resources.cattle.io +spec: + additionalPrinterColumns: + - JSONPath: .status.backupSource + name: Backup-Source + type: string + - JSONPath: .spec.backupFilename + name: Backup-File + type: string + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + - JSONPath: .status.conditions[?(@.type=="Ready")].message + name: Status + type: string + group: resources.cattle.io + names: + kind: Restore + plural: restores + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + properties: + backupFilename: + type: string + deleteTimeoutSeconds: + maximum: 10 + type: integer + encryptionConfigSecretName: + type: string + prune: + nullable: true + type: boolean + storageLocation: + nullable: true + properties: + s3: + nullable: true + properties: + bucketName: + type: string + credentialSecretName: + type: string + credentialSecretNamespace: + type: string + endpoint: + type: string + endpointCA: + type: string + folder: + type: string + insecureTLSSkipVerify: + type: boolean + region: + type: string + type: object + type: object + required: + - backupFilename + type: object + status: + properties: + backupSource: + type: string + conditions: + items: + properties: + lastTransitionTime: + type: string + lastUpdateTime: + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + nullable: true + type: array + observedGeneration: + type: integer + restoreCompletionTs: + type: string + summary: + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true diff --git a/charts/rancher-backup/rancher-backup/1.0.400/Chart.yaml b/charts/rancher-backup/rancher-backup/1.0.400/Chart.yaml new file mode 100755 index 000000000..0cc5ada17 --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/Chart.yaml @@ -0,0 +1,20 @@ +annotations: + catalog.cattle.io/auto-install: rancher-backup-crd=match + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: Rancher Backups + catalog.cattle.io/namespace: cattle-resources-system + catalog.cattle.io/os: linux + catalog.cattle.io/provides-gvr: resources.cattle.io.resourceset/v1 + catalog.cattle.io/release-name: rancher-backup + catalog.cattle.io/scope: management + catalog.cattle.io/ui-component: rancher-backup +apiVersion: v2 +appVersion: 1.0.4 +description: Provides ability to back up and restore the Rancher application running + on any Kubernetes cluster +icon: https://charts.rancher.io/assets/logos/backup-restore.svg +keywords: +- applications +- infrastructure +name: rancher-backup +version: 1.0.400 diff --git a/charts/rancher-backup/rancher-backup/1.0.400/README.md b/charts/rancher-backup/rancher-backup/1.0.400/README.md new file mode 100755 index 000000000..00fc96d92 --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/README.md @@ -0,0 +1,69 @@ +# Rancher Backup + +This chart provides ability to back up and restore the Rancher application running on any Kubernetes cluster. + +Refer [this](https://github.com/rancher/backup-restore-operator) repository for implementation details. + +----- + +### Get Repo Info +```bash +helm repo add rancher-chart https://charts.rancher.io +helm repo update +``` + +----- + +### Install Chart +```bash +helm install rancher-backup-crd rancher-chart/rancher-backup-crd -n cattle-resources-system --create-namespace +helm install rancher-backup rancher-chart/rancher-backup -n cattle-resources-system +``` + +----- + +### Configuration +The following table lists the configurable parameters of the rancher-backup chart and their default values: + +| Parameter | Description | Default | +|----------|---------------|-------| +| image.repository | Container image repository | rancher/backup-restore-operator | +| image.tag | Container image tag | v0.1.0-rc1 | +| s3.enabled | Configure S3 compatible default storage location. Current version supports S3 and MinIO | false | +| s3.credentialSecretName | Name of the Secret containing S3 credentials. This is an optional field. Skip this field in order to use IAM Role authentication. The Secret must contain following two keys, `accessKey` and `secretKey` | "" | +| s3.credentialSecretNamespace | Namespace of the Secret containing S3 credentials. This can be any namespace. | "" | +| s3.region | Region of the S3 Bucket (Required for S3, not valid for MinIO) | "" | +| s3.bucketName | Name of the Bucket | "" | +| s3.folder | Base folder within the Bucket (optional) | "" | +| s3.endpoint | Endpoint for the S3 storage provider | "" | +| s3.endpointCA | Base64 encoded CA cert for the S3 storage provider (optional) | "" | +| s3.insecureTLSSkipVerify | Skip SSL verification | false | +| persistence.enabled | Configure a Persistent Volume as the default storage location. It accepts either a StorageClass name to create a PVC, or directly accepts the PV to use. The Persistent Volume is mounted at `/var/lib/backups` in the operator pod | false | +| persistence.storageClass | StorageClass to use for dynamically provisioning the Persistent Volume, which will be used for storing backups | "" | +| persistence.volumeName | Persistent Volume to use for storing backups | "" | +| persistence.size | Requested size of the Persistent Volume (Applicable when using dynamic provisioning) | "" | +| nodeSelector | https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector | {} | +| tolerations | https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration | [] | +| affinity | https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity | {} | + +----- + +### CRDs + +Refer [this](https://github.com/rancher/backup-restore-operator#crds) section for information on CRDs that this chart installs. Also refer [this](https://github.com/rancher/backup-restore-operator/tree/master/examples) folder containing sample manifests for the CRDs. + +----- +### Upgrading Chart +```bash +helm upgrade rancher-backup-crd -n cattle-resources-system +helm upgrade rancher-backup -n cattle-resources-system +``` + +----- +### Uninstall Chart + +```bash +helm uninstall rancher-backup -n cattle-resources-system +helm uninstall rancher-backup-crd -n cattle-resources-system +``` + diff --git a/charts/rancher-backup/rancher-backup/1.0.400/app-readme.md b/charts/rancher-backup/rancher-backup/1.0.400/app-readme.md new file mode 100755 index 000000000..15a021cdb --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/app-readme.md @@ -0,0 +1,15 @@ +# Rancher Backup + +This chart enables ability to capture backups of the Rancher application and restore from these backups. This chart can be used to migrate Rancher from one Kubernetes cluster to a different Kubernetes cluster. + +For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/backups/v2.5/). + +This chart installs the following components: + +- [backup-restore-operator](https://github.com/rancher/backup-restore-operator) + - The operator handles backing up all Kubernetes resources and CRDs that Rancher creates and manages from the local cluster. It gathers these resources by querying the Kubernetes API server, packages all the resources to create a tarball file and saves it in the configured backup storage location. + - The operator can be configured to store backups in S3-compatible object stores such as AWS S3 and MinIO, and in persistent volumes. During deployment, you can create a default storage location, but there is always the option to override the default storage location with each backup, but will be limited to using an S3-compatible object store. + - It preserves the ownerReferences on all resources, hence maintaining dependencies between objects. + - This operator provides encryption support, to encrypt user specified resources before saving them in the backup file. It uses the same encryption configuration that is used to enable [Kubernetes Encryption at Rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/). +- Backup - A backup is a CRD (`Backup`) that defines when to take backups, where to store the backup and what encryption to use (optional). Backups can be taken ad hoc or scheduled to be taken in intervals. +- Restore - A restore is a CRD (`Restore`) that defines which backup to use to restore the Rancher application to. diff --git a/charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/eks.yaml b/charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/eks.yaml new file mode 100755 index 000000000..59f47ce47 --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/eks.yaml @@ -0,0 +1,17 @@ +- apiVersion: "eks.cattle.io/v1" + kindsRegexp: "." +- apiVersion: "apps/v1" + kindsRegexp: "^deployments$" + resourceNames: + - "eks-config-operator" +- apiVersion: "apiextensions.k8s.io/v1beta1" + kindsRegexp: "." + resourceNameRegexp: "eks.cattle.io$" +- apiVersion: "rbac.authorization.k8s.io/v1" + kindsRegexp: "^clusterroles$" + resourceNames: + - "eks-operator" +- apiVersion: "rbac.authorization.k8s.io/v1" + kindsRegexp: "^clusterrolebindings$" + resourceNames: + - "eks-operator" diff --git a/charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/fleet.yaml b/charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/fleet.yaml new file mode 100755 index 000000000..140a11978 --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/fleet.yaml @@ -0,0 +1,49 @@ +- apiVersion: "v1" + kindsRegexp: "^namespaces$" + resourceNameRegexp: "^fleet-|^cluster-fleet-" +- apiVersion: "v1" + kindsRegexp: "^secrets$" + namespaceRegexp: "^fleet-|^cluster-fleet-" + labelSelectors: + matchExpressions: + - key: "owner" + operator: "NotIn" + values: ["helm"] + - key: "fleet.cattle.io/managed" + operator: "In" + values: ["true"] +- apiVersion: "v1" + kindsRegexp: "^serviceaccounts$" + namespaceRegexp: "^fleet-|^cluster-fleet-" +- apiVersion: "v1" + kindsRegexp: "^configmaps$" + namespaceRegexp: "^fleet-|^cluster-fleet-" +- apiVersion: "rbac.authorization.k8s.io/v1" + kindsRegexp: "^roles$|^rolebindings$" + namespaceRegexp: "^fleet-|^cluster-fleet-" +- apiVersion: "rbac.authorization.k8s.io/v1" + kindsRegexp: "^clusterrolebindings$" + resourceNameRegexp: "^fleet-|^gitjob-" +- apiVersion: "rbac.authorization.k8s.io/v1" + kindsRegexp: "^clusterroles$" + resourceNameRegexp: "^fleet-" + resourceNames: + - "gitjob" +- apiVersion: "apiextensions.k8s.io/v1beta1" + kindsRegexp: "." + resourceNameRegexp: "fleet.cattle.io$|gitjob.cattle.io$" +- apiVersion: "fleet.cattle.io/v1alpha1" + kindsRegexp: "." +- apiVersion: "gitjob.cattle.io/v1" + kindsRegexp: "." +- apiVersion: "apps/v1" + kindsRegexp: "^deployments$" + namespaceRegexp: "^fleet-|^cluster-fleet-" + resourceNameRegexp: "^fleet-" + resourceNames: + - "gitjob" +- apiVersion: "apps/v1" + kindsRegexp: "^services$" + namespaceRegexp: "^fleet-|^cluster-fleet-" + resourceNames: + - "gitjob" diff --git a/charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/gke.yaml b/charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/gke.yaml new file mode 100755 index 000000000..a77019235 --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/gke.yaml @@ -0,0 +1,17 @@ +- apiVersion: "apiextensions.k8s.io/v1beta1" + kindsRegexp: "." + resourceNameRegexp: "gke.cattle.io$" +- apiVersion: "gke.cattle.io/v1" + kindsRegexp: "." +- apiVersion: "apps/v1" + kindsRegexp: "^deployments$" + resourceNames: + - "gke-config-operator" +- apiVersion: "rbac.authorization.k8s.io/v1" + kindsRegexp: "^clusterroles$" + resourceNames: + - "gke-operator" +- apiVersion: "rbac.authorization.k8s.io/v1" + kindsRegexp: "^clusterrolebindings$" + resourceNames: + - "gke-operator" diff --git a/charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/rancher-operator.yaml b/charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/rancher-operator.yaml new file mode 100755 index 000000000..3518fb5b7 --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/rancher-operator.yaml @@ -0,0 +1,27 @@ +- apiVersion: "rancher.cattle.io/v1" + kindsRegexp: "." +- apiVersion: "apps/v1" + kindsRegexp: "^deployments$" + resourceNames: + - "rancher-operator" + namespaces: + - "rancher-operator-system" +- apiVersion: "v1" + kindsRegexp: "^serviceaccounts$" + namespaces: + - "rancher-operator-system" +- apiVersion: "rbac.authorization.k8s.io/v1" + kindsRegexp: "^clusterrolebindings$" + resourceNames: + - "rancher-operator" +- apiVersion: "rbac.authorization.k8s.io/v1" + kindsRegexp: "^clusterroles$" + resourceNames: + - "rancher-operator" +- apiVersion: "apiextensions.k8s.io/v1beta1" + kindsRegexp: "." + resourceNameRegexp: "rancher.cattle.io$" +- apiVersion: "v1" + kindsRegexp: "^namespaces$" + resourceNames: + - "rancher-operator-system" diff --git a/charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/rancher.yaml b/charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/rancher.yaml new file mode 100755 index 000000000..14d65a8d6 --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/files/default-resourceset-contents/rancher.yaml @@ -0,0 +1,44 @@ +- apiVersion: "v1" + kindsRegexp: "^namespaces$" + resourceNameRegexp: "^cattle-|^p-|^c-|^user-|^u-" + resourceNames: + - "local" +- apiVersion: "apps/v1" + kindsRegexp: "^deployments$" + namespaces: + - "cattle-system" +- apiVersion: "v1" + kindsRegexp: "^secrets$" + namespaceRegexp: "^cattle-|^p-|^c-|^local$|^user-|^u-" + labelSelectors: + matchExpressions: + - key: "owner" + operator: "NotIn" + values: ["helm"] +- apiVersion: "v1" + kindsRegexp: "^serviceaccounts$" + namespaceRegexp: "^cattle-|^p-|^c-|^local$|^user-|^u-" +- apiVersion: "v1" + kindsRegexp: "^configmaps$" + namespaces: + - "cattle-system" +- apiVersion: "rbac.authorization.k8s.io/v1" + kindsRegexp: "^roles$|^rolebindings$" + namespaceRegexp: "^cattle-|^p-|^c-|^local$|^user-|^u-" +- apiVersion: "rbac.authorization.k8s.io/v1" + kindsRegexp: "^clusterrolebindings$" + resourceNameRegexp: "^cattle-|^clusterrolebinding-|^globaladmin-user-|^grb-u-|^crb-" +- apiVersion: "rbac.authorization.k8s.io/v1" + kindsRegexp: "^clusterroles$" + resourceNameRegexp: "^cattle-|^p-|^c-|^local-|^user-|^u-|^project-|^create-ns$" +- apiVersion: "apiextensions.k8s.io/v1beta1" + kindsRegexp: "." + resourceNameRegexp: "management.cattle.io$|project.cattle.io$|catalog.cattle.io$|resources.cattle.io$" +- apiVersion: "management.cattle.io/v3" + kindsRegexp: "." +- apiVersion: "project.cattle.io/v3" + kindsRegexp: "." +- apiVersion: "catalog.cattle.io/v1" + kindsRegexp: "^clusterrepos$" +- apiVersion: "resources.cattle.io/v1" + kindsRegexp: "^ResourceSet$" diff --git a/charts/rancher-backup/rancher-backup/1.0.400/templates/_helpers.tpl b/charts/rancher-backup/rancher-backup/1.0.400/templates/_helpers.tpl new file mode 100755 index 000000000..411cfc63a --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/templates/_helpers.tpl @@ -0,0 +1,76 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "backupRestore.fullname" -}} +{{- .Chart.Name | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "backupRestore.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "backupRestore.labels" -}} +helm.sh/chart: {{ include "backupRestore.chart" . }} +{{ include "backupRestore.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "backupRestore.selectorLabels" -}} +app.kubernetes.io/name: {{ include "backupRestore.fullname" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +resources.cattle.io/operator: backup-restore +{{- end }} + + +{{/* +Create the name of the service account to use +*/}} +{{- define "backupRestore.serviceAccountName" -}} +{{ include "backupRestore.fullname" . }} +{{- end }} + + +{{- define "backupRestore.s3SecretName" -}} +{{- printf "%s-%s" .Chart.Name "s3" | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create PVC name using release and revision number. +*/}} +{{- define "backupRestore.pvcName" -}} +{{- printf "%s-%d" .Release.Name .Release.Revision }} +{{- end }} + diff --git a/charts/rancher-backup/rancher-backup/1.0.400/templates/clusterrolebinding.yaml b/charts/rancher-backup/rancher-backup/1.0.400/templates/clusterrolebinding.yaml new file mode 100755 index 000000000..cf4abf670 --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/templates/clusterrolebinding.yaml @@ -0,0 +1,14 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "backupRestore.fullname" . }} + labels: + {{- include "backupRestore.labels" . | nindent 4 }} +subjects: +- kind: ServiceAccount + name: {{ include "backupRestore.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io diff --git a/charts/rancher-backup/rancher-backup/1.0.400/templates/deployment.yaml b/charts/rancher-backup/rancher-backup/1.0.400/templates/deployment.yaml new file mode 100755 index 000000000..776351ae5 --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/templates/deployment.yaml @@ -0,0 +1,59 @@ +{{- if and .Values.s3.enabled .Values.persistence.enabled }} +{{- fail "\n\nCannot configure both s3 and PV for storing backups" }} +{{- end }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "backupRestore.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "backupRestore.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "backupRestore.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + {{- include "backupRestore.selectorLabels" . | nindent 8 }} + annotations: + checksum/s3: {{ include (print $.Template.BasePath "/s3-secret.yaml") . | sha256sum }} + checksum/pvc: {{ include (print $.Template.BasePath "/pvc.yaml") . | sha256sum }} + spec: + serviceAccountName: {{ include "backupRestore.serviceAccountName" . }} + containers: + - name: {{ .Chart.Name }} + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: Always + env: + - name: CHART_NAMESPACE + value: {{ .Release.Namespace }} + {{- if .Values.s3.enabled }} + - name: DEFAULT_S3_BACKUP_STORAGE_LOCATION + value: {{ include "backupRestore.s3SecretName" . }} + {{- end }} + {{- if .Values.persistence.enabled }} + - name: DEFAULT_PERSISTENCE_ENABLED + value: "persistence-enabled" + volumeMounts: + - mountPath: "/var/lib/backups" + name: pv-storage + volumes: + - name: pv-storage + persistentVolumeClaim: + claimName: {{ include "backupRestore.pvcName" . }} + {{- end }} + nodeSelector: + kubernetes.io/os: linux + {{- with .Values.nodeSelector }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + tolerations: + {{- include "linux-node-tolerations" . | nindent 8}} + {{- with .Values.tolerations }} + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/rancher-backup/rancher-backup/1.0.400/templates/pvc.yaml b/charts/rancher-backup/rancher-backup/1.0.400/templates/pvc.yaml new file mode 100755 index 000000000..ff57e4dab --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/templates/pvc.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.persistence.enabled -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "backupRestore.pvcName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "backupRestore.labels" . | nindent 4 }} +spec: + accessModes: + - ReadWriteOnce + resources: + {{- with .Values.persistence }} + requests: + storage: {{ .size | quote }} +{{- if .storageClass }} +{{- if (eq "-" .storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: {{ .storageClass | quote }} +{{- end }} +{{- end }} +{{- if .volumeName }} + volumeName: {{ .volumeName | quote }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/rancher-backup/rancher-backup/1.0.400/templates/rancher-resourceset.yaml b/charts/rancher-backup/rancher-backup/1.0.400/templates/rancher-resourceset.yaml new file mode 100755 index 000000000..05add8824 --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/templates/rancher-resourceset.yaml @@ -0,0 +1,13 @@ +apiVersion: resources.cattle.io/v1 +kind: ResourceSet +metadata: + name: rancher-resource-set +controllerReferences: + - apiVersion: "apps/v1" + resource: "deployments" + name: "rancher" + namespace: "cattle-system" +resourceSelectors: +{{- range $path, $_ := .Files.Glob "files/default-resourceset-contents/*.yaml" -}} + {{- $.Files.Get $path | nindent 2 -}} +{{- end -}} diff --git a/charts/rancher-backup/rancher-backup/1.0.400/templates/s3-secret.yaml b/charts/rancher-backup/rancher-backup/1.0.400/templates/s3-secret.yaml new file mode 100755 index 000000000..a07623d90 --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/templates/s3-secret.yaml @@ -0,0 +1,31 @@ +{{- if .Values.s3.enabled -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "backupRestore.s3SecretName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "backupRestore.labels" . | nindent 4 }} +type: Opaque +stringData: + {{- with .Values.s3 }} + {{- if .credentialSecretName }} + credentialSecretName: {{ .credentialSecretName }} + credentialSecretNamespace: {{ required "When providing a Secret containing S3 credentials, a valid .Values.credentialSecretNamespace must be provided" .credentialSecretNamespace }} + {{- end }} + {{- if .region }} + region: {{ .region }} + {{- end }} + bucketName: {{ required "A valid .Values.bucketName is required for configuring S3 compatible storage as the default backup storage location" .bucketName }} + {{- if .folder }} + folder: {{ .folder }} + {{- end }} + endpoint: {{ required "A valid .Values.endpoint is required for configuring S3 compatible storage as the default backup storage location" .endpoint }} + {{- if .endpointCA }} + endpointCA: {{ .endpointCA }} + {{- end }} + {{- if .insecureTLSSkipVerify }} + insecureTLSSkipVerify: {{ .insecureTLSSkipVerify | quote }} + {{- end }} + {{- end }} +{{ end }} diff --git a/charts/rancher-backup/rancher-backup/1.0.400/templates/serviceaccount.yaml b/charts/rancher-backup/rancher-backup/1.0.400/templates/serviceaccount.yaml new file mode 100755 index 000000000..f333b746c --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/templates/serviceaccount.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "backupRestore.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "backupRestore.labels" . | nindent 4 }} diff --git a/charts/rancher-backup/rancher-backup/1.0.400/templates/validate-install-crd.yaml b/charts/rancher-backup/rancher-backup/1.0.400/templates/validate-install-crd.yaml new file mode 100755 index 000000000..f63fd2e2e --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/templates/validate-install-crd.yaml @@ -0,0 +1,16 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +# {{- $found := dict -}} +# {{- set $found "resources.cattle.io/v1/Backup" false -}} +# {{- set $found "resources.cattle.io/v1/ResourceSet" false -}} +# {{- set $found "resources.cattle.io/v1/Restore" false -}} +# {{- range .Capabilities.APIVersions -}} +# {{- if hasKey $found (toString .) -}} +# {{- set $found (toString .) true -}} +# {{- end -}} +# {{- end -}} +# {{- range $_, $exists := $found -}} +# {{- if (eq $exists false) -}} +# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}} +# {{- end -}} +# {{- end -}} +#{{- end -}} \ No newline at end of file diff --git a/charts/rancher-backup/rancher-backup/1.0.400/values.yaml b/charts/rancher-backup/rancher-backup/1.0.400/values.yaml new file mode 100755 index 000000000..69ed2e011 --- /dev/null +++ b/charts/rancher-backup/rancher-backup/1.0.400/values.yaml @@ -0,0 +1,49 @@ +image: + repository: rancher/backup-restore-operator + tag: v1.0.4-rc4 + +## Default s3 bucket for storing all backup files created by the backup-restore-operator +s3: + enabled: false + ## credentialSecretName if set, should be the name of the Secret containing AWS credentials. + ## To use IAM Role, don't set this field + credentialSecretName: "" + credentialSecretNamespace: "" + region: "" + bucketName: "" + folder: "" + endpoint: "" + endpointCA: "" + insecureTLSSkipVerify: false + +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## If persistence is enabled, operator will create a PVC with mountPath /var/lib/backups +persistence: + enabled: false + + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack). + ## Refer https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 + ## + storageClass: "-" + + ## If you want to disable dynamic provisioning by setting storageClass to "-" above, + ## and want to target a particular PV, provide name of the target volume + volumeName: "" + + ## Only certain StorageClasses allow resizing PVs; Refer https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/ + size: 2Gi + + +global: + cattle: + systemDefaultRegistry: "" + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/Chart.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/Chart.yaml new file mode 100755 index 000000000..5d62a6b99 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/Chart.yaml @@ -0,0 +1,10 @@ +annotations: + catalog.cattle.io/certified: rancher + catalog.cattle.io/hidden: "true" + catalog.cattle.io/namespace: cis-operator-system + catalog.cattle.io/release-name: rancher-cis-benchmark-crd +apiVersion: v1 +description: Installs the CRDs for rancher-cis-benchmark. +name: rancher-cis-benchmark-crd +type: application +version: 1.0.400 diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/README.md b/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/README.md new file mode 100755 index 000000000..f6d9ef621 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/README.md @@ -0,0 +1,2 @@ +# rancher-cis-benchmark-crd +A Rancher chart that installs the CRDs used by rancher-cis-benchmark. diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscan.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscan.yaml new file mode 100755 index 000000000..beca6e1f8 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscan.yaml @@ -0,0 +1,149 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterscans.cis.cattle.io +spec: + additionalPrinterColumns: + - JSONPath: .status.lastRunScanProfileName + name: ClusterScanProfile + type: string + - JSONPath: .status.summary.total + name: Total + type: string + - JSONPath: .status.summary.pass + name: Pass + type: string + - JSONPath: .status.summary.fail + name: Fail + type: string + - JSONPath: .status.summary.skip + name: Skip + type: string + - JSONPath: .status.summary.warn + name: Warn + type: string + - JSONPath: .status.summary.notApplicable + name: Not Applicable + type: string + - JSONPath: .status.lastRunTimestamp + name: LastRunTimestamp + type: string + - JSONPath: .spec.scheduledScanConfig.cronSchedule + name: CronSchedule + type: string + group: cis.cattle.io + names: + kind: ClusterScan + plural: clusterscans + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + properties: + scanProfileName: + nullable: true + type: string + scheduledScanConfig: + nullable: true + properties: + cronSchedule: + nullable: true + type: string + retentionCount: + type: integer + scanAlertRule: + nullable: true + properties: + alertOnComplete: + type: boolean + alertOnFailure: + type: boolean + type: object + type: object + scoreWarning: + enum: + - pass + - fail + nullable: true + type: string + type: object + status: + properties: + NextScanAt: + nullable: true + type: string + ScanAlertingRuleName: + nullable: true + type: string + conditions: + items: + properties: + lastTransitionTime: + nullable: true + type: string + lastUpdateTime: + nullable: true + type: string + message: + nullable: true + type: string + reason: + nullable: true + type: string + status: + nullable: true + type: string + type: + nullable: true + type: string + type: object + nullable: true + type: array + display: + nullable: true + properties: + error: + type: boolean + message: + nullable: true + type: string + state: + nullable: true + type: string + transitioning: + type: boolean + type: object + lastRunScanProfileName: + nullable: true + type: string + lastRunTimestamp: + nullable: true + type: string + observedGeneration: + type: integer + summary: + nullable: true + properties: + fail: + type: integer + notApplicable: + type: integer + pass: + type: integer + skip: + type: integer + total: + type: integer + warn: + type: integer + type: object + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscanbenchmark.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscanbenchmark.yaml new file mode 100755 index 000000000..aa6fc2218 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscanbenchmark.yaml @@ -0,0 +1,55 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterscanbenchmarks.cis.cattle.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.clusterProvider + name: ClusterProvider + type: string + - JSONPath: .spec.minKubernetesVersion + name: MinKubernetesVersion + type: string + - JSONPath: .spec.maxKubernetesVersion + name: MaxKubernetesVersion + type: string + - JSONPath: .spec.customBenchmarkConfigMapName + name: customBenchmarkConfigMapName + type: string + - JSONPath: .spec.customBenchmarkConfigMapNamespace + name: customBenchmarkConfigMapNamespace + type: string + group: cis.cattle.io + names: + kind: ClusterScanBenchmark + plural: clusterscanbenchmarks + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + properties: + clusterProvider: + nullable: true + type: string + customBenchmarkConfigMapName: + nullable: true + type: string + customBenchmarkConfigMapNamespace: + nullable: true + type: string + maxKubernetesVersion: + nullable: true + type: string + minKubernetesVersion: + nullable: true + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscanprofile.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscanprofile.yaml new file mode 100755 index 000000000..21bb68396 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscanprofile.yaml @@ -0,0 +1,37 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterscanprofiles.cis.cattle.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.benchmarkVersion + name: BenchmarkVersion + type: string + group: cis.cattle.io + names: + kind: ClusterScanProfile + plural: clusterscanprofiles + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + properties: + benchmarkVersion: + nullable: true + type: string + skipTests: + items: + nullable: true + type: string + nullable: true + type: array + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscanreport.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscanreport.yaml new file mode 100755 index 000000000..017020a95 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark-crd/1.0.400/templates/clusterscanreport.yaml @@ -0,0 +1,40 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterscanreports.cis.cattle.io +spec: + additionalPrinterColumns: + - JSONPath: .spec.lastRunTimestamp + name: LastRunTimestamp + type: string + - JSONPath: .spec.benchmarkVersion + name: BenchmarkVersion + type: string + group: cis.cattle.io + names: + kind: ClusterScanReport + plural: clusterscanreports + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + spec: + properties: + benchmarkVersion: + nullable: true + type: string + lastRunTimestamp: + nullable: true + type: string + reportJSON: + nullable: true + type: string + type: object + type: object + version: v1 + versions: + - name: v1 + served: true + storage: true diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/Chart.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/Chart.yaml new file mode 100755 index 000000000..2ca42fb2b --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/Chart.yaml @@ -0,0 +1,18 @@ +annotations: + catalog.cattle.io/auto-install: rancher-cis-benchmark-crd=match + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: CIS Benchmark + catalog.cattle.io/namespace: cis-operator-system + catalog.cattle.io/os: linux + catalog.cattle.io/provides-gvr: cis.cattle.io.clusterscans/v1 + catalog.cattle.io/release-name: rancher-cis-benchmark + catalog.cattle.io/ui-component: rancher-cis-benchmark +apiVersion: v1 +appVersion: v1.0.4 +description: The cis-operator enables running CIS benchmark security scans on a kubernetes + cluster +icon: https://charts.rancher.io/assets/logos/cis-kube-bench.svg +keywords: +- security +name: rancher-cis-benchmark +version: 1.0.400 diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/README.md b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/README.md new file mode 100755 index 000000000..50beab58b --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/README.md @@ -0,0 +1,9 @@ +# Rancher CIS Benchmark Chart + +The cis-operator enables running CIS benchmark security scans on a kubernetes cluster and generate compliance reports that can be downloaded. + +# Installation + +``` +helm install rancher-cis-benchmark ./ --create-namespace -n cis-operator-system +``` diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/app-readme.md b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/app-readme.md new file mode 100755 index 000000000..5e495d605 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/app-readme.md @@ -0,0 +1,15 @@ +# Rancher CIS Benchmarks + +This chart enables security scanning of the cluster using [CIS (Center for Internet Security) benchmarks](https://www.cisecurity.org/benchmark/kubernetes/). + +For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/cis-scans/v2.5/). + +This chart installs the following components: + +- [cis-operator](https://github.com/rancher/cis-operator) - The cis-operator handles launching the [kube-bench](https://github.com/aquasecurity/kube-bench) tool that runs a suite of CIS tests on the nodes of your Kubernetes cluster. After scans finish, the cis-operator generates a compliance report that can be downloaded. +- Scans - A scan is a CRD (`ClusterScan`) that defines when to trigger CIS scans on the cluster based on the defined profile. A report is created after the scan is completed. +- Profiles - A profile is a CRD (`ClusterScanProfile`) that defines the configuration for the CIS scan, which is the benchmark versions to use and any specific tests to skip in that benchmark. This chart installs a few default `ClusterScanProfile` custom resources with no skipped tests, which can immediately be used to launch CIS scans. +- Benchmark Versions - A benchmark version is a CRD (`ClusterScanBenchmark`) that defines the CIS benchmark version to run using kube-bench as well as the valid configuration parameters for that benchmark. This chart installs a few default `ClusterScanBenchmark` custom resources. +- Alerting Resources - Rancher's CIS Benchmark application lets you run a cluster scan on a schedule, and send alerts when scans finish. + - If you want to enable alerts to be delivered when a cluster scan completes, you need to ensure that [Rancher's Monitoring and Alerting](https://rancher.com/docs/rancher/v2.x/en/monitoring-alerting/v2.5/) application is pre-installed and the [Receivers and Routes](https://rancher.com/docs/rancher/v2.x/en/monitoring-alerting/v2.5/configuration/#alertmanager-config) are configured to send out alerts. + - Additionally, you need to set `alerts: true` in the Values YAML while installing or upgrading this chart. diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/_helpers.tpl b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/_helpers.tpl new file mode 100755 index 000000000..67f4ce116 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/_helpers.tpl @@ -0,0 +1,23 @@ +{{/* Ensure namespace is set the same everywhere */}} +{{- define "cis.namespace" -}} + {{- .Release.Namespace | default "cis-operator-system" -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} +{{- define "linux_node_tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/alertingrule.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/alertingrule.yaml new file mode 100755 index 000000000..1787c88a0 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/alertingrule.yaml @@ -0,0 +1,14 @@ +{{- if .Values.alerts.enabled -}} +--- +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: rancher-cis-pod-monitor + namespace: {{ template "cis.namespace" . }} +spec: + selector: + matchLabels: + cis.cattle.io/operator: cis-operator + podMetricsEndpoints: + - port: cismetrics +{{- end }} diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-cis-1.5.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-cis-1.5.yaml new file mode 100755 index 000000000..39e8b834a --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-cis-1.5.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanBenchmark +metadata: + name: cis-1.5 +spec: + clusterProvider: "" + minKubernetesVersion: "1.15.0" diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-cis-1.6.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-cis-1.6.yaml new file mode 100755 index 000000000..93ba064f4 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-cis-1.6.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanBenchmark +metadata: + name: cis-1.6 +spec: + clusterProvider: "" + minKubernetesVersion: "1.16.0" diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-eks-1.0.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-eks-1.0.yaml new file mode 100755 index 000000000..bd2e32cd3 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-eks-1.0.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanBenchmark +metadata: + name: eks-1.0 +spec: + clusterProvider: eks + minKubernetesVersion: "1.15.0" diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-gke-1.0.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-gke-1.0.yaml new file mode 100755 index 000000000..72122e8c5 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-gke-1.0.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanBenchmark +metadata: + name: gke-1.0 +spec: + clusterProvider: gke + minKubernetesVersion: "1.15.0" diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-k3s-cis-1.6-hardened.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-k3s-cis-1.6-hardened.yaml new file mode 100755 index 000000000..3ca9b6009 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-k3s-cis-1.6-hardened.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanBenchmark +metadata: + name: k3s-cis-1.6-hardened +spec: + clusterProvider: k3s + minKubernetesVersion: "1.20.5" diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-k3s-cis-1.6-permissive.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-k3s-cis-1.6-permissive.yaml new file mode 100755 index 000000000..6d4253c6e --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-k3s-cis-1.6-permissive.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanBenchmark +metadata: + name: k3s-cis-1.6-permissive +spec: + clusterProvider: k3s + minKubernetesVersion: "1.20.5" diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.5-hardened.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.5-hardened.yaml new file mode 100755 index 000000000..b5627f966 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.5-hardened.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanBenchmark +metadata: + name: rke-cis-1.5-hardened +spec: + clusterProvider: rke + minKubernetesVersion: "1.15.0" diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.5-permissive.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.5-permissive.yaml new file mode 100755 index 000000000..95f80c0f0 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.5-permissive.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanBenchmark +metadata: + name: rke-cis-1.5-permissive +spec: + clusterProvider: rke + minKubernetesVersion: "1.15.0" diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.6-hardened.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.6-hardened.yaml new file mode 100755 index 000000000..d75de8154 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.6-hardened.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanBenchmark +metadata: + name: rke-cis-1.6-hardened +spec: + clusterProvider: rke + minKubernetesVersion: "1.16.0" diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.6-permissive.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.6-permissive.yaml new file mode 100755 index 000000000..52428f4a7 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke-cis-1.6-permissive.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanBenchmark +metadata: + name: rke-cis-1.6-permissive +spec: + clusterProvider: rke + minKubernetesVersion: "1.16.0" diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.5-hardened.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.5-hardened.yaml new file mode 100755 index 000000000..3d83e9bd8 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.5-hardened.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanBenchmark +metadata: + name: rke2-cis-1.5-hardened +spec: + clusterProvider: rke2 + minKubernetesVersion: "1.18.0" diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.5-permissive.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.5-permissive.yaml new file mode 100755 index 000000000..f66aa8f6e --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.5-permissive.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanBenchmark +metadata: + name: rke2-cis-1.5-permissive +spec: + clusterProvider: rke2 + minKubernetesVersion: "1.18.0" diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.6-hardened.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.6-hardened.yaml new file mode 100755 index 000000000..3593bf371 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.6-hardened.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanBenchmark +metadata: + name: rke2-cis-1.6-hardened +spec: + clusterProvider: rke2 + minKubernetesVersion: "1.20.5" diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.6-permissive.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.6-permissive.yaml new file mode 100755 index 000000000..522f846ae --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/benchmark-rke2-cis-1.6-permissive.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanBenchmark +metadata: + name: rke2-cis-1.6-permissive +spec: + clusterProvider: rke2 + minKubernetesVersion: "1.20.5" diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/cis-roles.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/cis-roles.yaml new file mode 100755 index 000000000..23c93dc65 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/cis-roles.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cis-admin +rules: + - apiGroups: + - cis.cattle.io + resources: + - clusterscanbenchmarks + - clusterscanprofiles + - clusterscans + - clusterscanreports + verbs: ["create", "update", "delete", "patch","get", "watch", "list"] + - apiGroups: + - catalog.cattle.io + resources: ["apps"] + resourceNames: ["rancher-cis-benchmark"] + verbs: ["get", "watch", "list"] + - apiGroups: + - "" + resources: + - configmaps + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cis-view +rules: + - apiGroups: + - cis.cattle.io + resources: + - clusterscanbenchmarks + - clusterscanprofiles + - clusterscans + - clusterscanreports + verbs: ["get", "watch", "list"] + - apiGroups: + - catalog.cattle.io + resources: ["apps"] + resourceNames: ["rancher-cis-benchmark"] + verbs: ["get", "watch", "list"] + - apiGroups: + - "" + resources: + - configmaps + verbs: ["get", "watch", "list"] diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/configmap.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/configmap.yaml new file mode 100755 index 000000000..6cbc23db4 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/configmap.yaml @@ -0,0 +1,17 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: default-clusterscanprofiles + namespace: {{ template "cis.namespace" . }} +data: + # Default ClusterScanProfiles per cluster provider type + rke: |- + <1.16.0: rke-profile-permissive-1.5 + >=1.16.0: rke-profile-permissive-1.6 + rke2: |- + <1.20.5: rke2-cis-1.5-profile-permissive + >=1.20.5: rke2-cis-1.6-profile-permissive + eks: "eks-profile" + gke: "gke-profile" + k3s: "k3s-cis-1.6-profile-permissive" + default: "cis-1.6-profile" diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/deployment.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/deployment.yaml new file mode 100755 index 000000000..0d3c75e39 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/deployment.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cis-operator + namespace: {{ template "cis.namespace" . }} + labels: + cis.cattle.io/operator: cis-operator +spec: + selector: + matchLabels: + cis.cattle.io/operator: cis-operator + template: + metadata: + labels: + cis.cattle.io/operator: cis-operator + spec: + serviceAccountName: cis-operator-serviceaccount + containers: + - name: cis-operator + image: '{{ template "system_default_registry" . }}{{ .Values.image.cisoperator.repository }}:{{ .Values.image.cisoperator.tag }}' + imagePullPolicy: Always + ports: + - name: cismetrics + containerPort: {{ .Values.alerts.metricsPort }} + env: + - name: SECURITY_SCAN_IMAGE + value: {{ template "system_default_registry" . }}{{ .Values.image.securityScan.repository }} + - name: SECURITY_SCAN_IMAGE_TAG + value: {{ .Values.image.securityScan.tag }} + - name: SONOBUOY_IMAGE + value: {{ template "system_default_registry" . }}{{ .Values.image.sonobuoy.repository }} + - name: SONOBUOY_IMAGE_TAG + value: {{ .Values.image.sonobuoy.tag }} + - name: CIS_ALERTS_METRICS_PORT + value: '{{ .Values.alerts.metricsPort }}' + - name: CIS_ALERTS_SEVERITY + value: {{ .Values.alerts.severity }} + - name: CIS_ALERTS_ENABLED + value: {{ .Values.alerts.enabled | default "false" | quote }} + - name: CLUSTER_NAME + value: {{ .Values.global.cattle.clusterName }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + nodeSelector: + kubernetes.io/os: linux + {{- with .Values.nodeSelector }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + tolerations: + {{- include "linux_node_tolerations" . | nindent 8}} + {{- with .Values.tolerations }} + {{- toYaml . | nindent 8 }} + {{- end }} \ No newline at end of file diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/network_policy_allow_all.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/network_policy_allow_all.yaml new file mode 100755 index 000000000..6ed5d645e --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/network_policy_allow_all.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-allow-all + namespace: {{ template "cis.namespace" . }} +spec: + podSelector: {} + ingress: + - {} + egress: + - {} + policyTypes: + - Ingress + - Egress diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/patch_default_serviceaccount.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/patch_default_serviceaccount.yaml new file mode 100755 index 000000000..1efa3ed1c --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/patch_default_serviceaccount.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: patch-sa + annotations: + "helm.sh/hook": post-install, post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation +spec: + template: + spec: + serviceAccountName: cis-operator-serviceaccount + restartPolicy: Never + containers: + - name: sa + image: "{{ template "system_default_registry" . }}{{ .Values.global.kubectl.repository }}:{{ .Values.global.kubectl.tag }}" + imagePullPolicy: {{ .Values.global.imagePullPolicy }} + command: ["kubectl", "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"] + args: ["-n", {{ template "cis.namespace" . }}] + backoffLimit: 1 diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/rbac.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/rbac.yaml new file mode 100755 index 000000000..816991f23 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/rbac.yaml @@ -0,0 +1,43 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: rancher-cis-benchmark + app.kubernetes.io/instance: release-name + name: cis-operator-role +rules: +- apiGroups: + - '*' + resources: + - '*' + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: rancher-cis-benchmark + app.kubernetes.io/instance: release-name + name: cis-operator-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cis-operator-role +subjects: +- kind: ServiceAccount + name: cis-serviceaccount + namespace: {{ template "cis.namespace" . }} +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: cis-operator-installer +subjects: +- kind: ServiceAccount + name: cis-operator-serviceaccount + namespace: {{ template "cis.namespace" . }} +roleRef: + kind: ClusterRole + name: cluster-admin + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-cis-1.5.yml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-cis-1.5.yml new file mode 100755 index 000000000..d69ae9dd5 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-cis-1.5.yml @@ -0,0 +1,9 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanProfile +metadata: + name: cis-1.5-profile + annotations: + clusterscanprofile.cis.cattle.io/builtin: "true" +spec: + benchmarkVersion: cis-1.5 diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-cis-1.6.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-cis-1.6.yaml new file mode 100755 index 000000000..8a8d8bf88 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-cis-1.6.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanProfile +metadata: + name: cis-1.6-profile + annotations: + clusterscanprofile.cis.cattle.io/builtin: "true" +spec: + benchmarkVersion: cis-1.6 diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-k3s-cis-1.6-hardened.yml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-k3s-cis-1.6-hardened.yml new file mode 100755 index 000000000..095e977ab --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-k3s-cis-1.6-hardened.yml @@ -0,0 +1,9 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanProfile +metadata: + name: k3s-cis-1.6-profile-hardened + annotations: + clusterscanprofile.cis.cattle.io/builtin: "true" +spec: + benchmarkVersion: k3s-cis-1.6-hardened diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-k3s-cis-1.6-permissive.yml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-k3s-cis-1.6-permissive.yml new file mode 100755 index 000000000..3b22a80c8 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-k3s-cis-1.6-permissive.yml @@ -0,0 +1,9 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanProfile +metadata: + name: k3s-cis-1.6-profile-permissive + annotations: + clusterscanprofile.cis.cattle.io/builtin: "true" +spec: + benchmarkVersion: k3s-cis-1.6-permissive diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.5-hardened.yml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.5-hardened.yml new file mode 100755 index 000000000..4eabe158a --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.5-hardened.yml @@ -0,0 +1,9 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanProfile +metadata: + name: rke-profile-hardened-1.5 + annotations: + clusterscanprofile.cis.cattle.io/builtin: "true" +spec: + benchmarkVersion: rke-cis-1.5-hardened \ No newline at end of file diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.5-permissive.yml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.5-permissive.yml new file mode 100755 index 000000000..1f78751d1 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.5-permissive.yml @@ -0,0 +1,9 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanProfile +metadata: + name: rke-profile-permissive-1.5 + annotations: + clusterscanprofile.cis.cattle.io/builtin: "true" +spec: + benchmarkVersion: rke-cis-1.5-permissive diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.6-hardened.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.6-hardened.yaml new file mode 100755 index 000000000..d38febd80 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.6-hardened.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanProfile +metadata: + name: rke-profile-hardened-1.6 + annotations: + clusterscanprofile.cis.cattle.io/builtin: "true" +spec: + benchmarkVersion: rke-cis-1.6-hardened diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.6-permissive.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.6-permissive.yaml new file mode 100755 index 000000000..d31b5b0d2 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke-1.6-permissive.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanProfile +metadata: + name: rke-profile-permissive-1.6 + annotations: + clusterscanprofile.cis.cattle.io/builtin: "true" +spec: + benchmarkVersion: rke-cis-1.6-permissive diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.5-hardened.yml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.5-hardened.yml new file mode 100755 index 000000000..83eb3131e --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.5-hardened.yml @@ -0,0 +1,9 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanProfile +metadata: + name: rke2-cis-1.5-profile-hardened + annotations: + clusterscanprofile.cis.cattle.io/builtin: "true" +spec: + benchmarkVersion: rke2-cis-1.5-hardened diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.5-permissive.yml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.5-permissive.yml new file mode 100755 index 000000000..40dc44bdf --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.5-permissive.yml @@ -0,0 +1,9 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanProfile +metadata: + name: rke2-cis-1.5-profile-permissive + annotations: + clusterscanprofile.cis.cattle.io/builtin: "true" +spec: + benchmarkVersion: rke2-cis-1.5-permissive diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.6-hardened.yml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.6-hardened.yml new file mode 100755 index 000000000..c7ac7f949 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.6-hardened.yml @@ -0,0 +1,9 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanProfile +metadata: + name: rke2-cis-1.6-profile-hardened + annotations: + clusterscanprofile.cis.cattle.io/builtin: "true" +spec: + benchmarkVersion: rke2-cis-1.6-hardened diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.6-permissive.yml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.6-permissive.yml new file mode 100755 index 000000000..96ca1345a --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofile-rke2-cis-1.6-permissive.yml @@ -0,0 +1,9 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanProfile +metadata: + name: rke2-cis-1.6-profile-permissive + annotations: + clusterscanprofile.cis.cattle.io/builtin: "true" +spec: + benchmarkVersion: rke2-cis-1.6-permissive diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofileeks.yml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofileeks.yml new file mode 100755 index 000000000..49c7e0246 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofileeks.yml @@ -0,0 +1,9 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanProfile +metadata: + name: eks-profile + annotations: + clusterscanprofile.cis.cattle.io/builtin: "true" +spec: + benchmarkVersion: eks-1.0 \ No newline at end of file diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofilegke.yml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofilegke.yml new file mode 100755 index 000000000..2ddd0686f --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/scanprofilegke.yml @@ -0,0 +1,9 @@ +--- +apiVersion: cis.cattle.io/v1 +kind: ClusterScanProfile +metadata: + name: gke-profile + annotations: + clusterscanprofile.cis.cattle.io/builtin: "true" +spec: + benchmarkVersion: gke-1.0 \ No newline at end of file diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/serviceaccount.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/serviceaccount.yaml new file mode 100755 index 000000000..ec48ec622 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: {{ template "cis.namespace" . }} + name: cis-operator-serviceaccount +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + namespace: {{ template "cis.namespace" . }} + labels: + app.kubernetes.io/name: rancher-cis-benchmark + app.kubernetes.io/instance: release-name + name: cis-serviceaccount diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/validate-install-crd.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/validate-install-crd.yaml new file mode 100755 index 000000000..562295791 --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/templates/validate-install-crd.yaml @@ -0,0 +1,17 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +# {{- $found := dict -}} +# {{- set $found "cis.cattle.io/v1/ClusterScan" false -}} +# {{- set $found "cis.cattle.io/v1/ClusterScanBenchmark" false -}} +# {{- set $found "cis.cattle.io/v1/ClusterScanProfile" false -}} +# {{- set $found "cis.cattle.io/v1/ClusterScanReport" false -}} +# {{- range .Capabilities.APIVersions -}} +# {{- if hasKey $found (toString .) -}} +# {{- set $found (toString .) true -}} +# {{- end -}} +# {{- end -}} +# {{- range $_, $exists := $found -}} +# {{- if (eq $exists false) -}} +# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}} +# {{- end -}} +# {{- end -}} +#{{- end -}} \ No newline at end of file diff --git a/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/values.yaml b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/values.yaml new file mode 100755 index 000000000..05bcdac4f --- /dev/null +++ b/charts/rancher-cis-benchmark/rancher-cis-benchmark/1.0.400/values.yaml @@ -0,0 +1,45 @@ +# Default values for rancher-cis-benchmark. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +image: + cisoperator: + repository: rancher/cis-operator + tag: v1.0.4 + securityScan: + repository: rancher/security-scan + tag: v0.2.3 + sonobuoy: + repository: rancher/mirrored-sonobuoy-sonobuoy + tag: v0.16.3 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +global: + cattle: + systemDefaultRegistry: "" + clusterName: "" + kubectl: + repository: rancher/kubectl + tag: v1.20.2 + +alerts: + enabled: false + severity: warning + metricsPort: 8080 diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/Chart.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/Chart.yaml new file mode 100755 index 000000000..00336402c --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/Chart.yaml @@ -0,0 +1,10 @@ +annotations: + catalog.cattle.io/certified: rancher + catalog.cattle.io/hidden: "true" + catalog.cattle.io/namespace: cattle-gatekeeper-system + catalog.cattle.io/release-name: rancher-gatekeeper-crd +apiVersion: v1 +description: Installs the CRDs for rancher-gatekeeper. +name: rancher-gatekeeper-crd +type: application +version: 3.3.001 diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/README.md b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/README.md new file mode 100755 index 000000000..26079c833 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/README.md @@ -0,0 +1,2 @@ +# rancher-gatekeeper-crd +A Rancher chart that installs the CRDs used by rancher-gatekeeper. diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/config-customresourcedefinition.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/config-customresourcedefinition.yaml new file mode 100755 index 000000000..73ffbdc32 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/config-customresourcedefinition.yaml @@ -0,0 +1,106 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + gatekeeper.sh/system: "yes" + name: configs.config.gatekeeper.sh +spec: + group: config.gatekeeper.sh + names: + kind: Config + listKind: ConfigList + plural: configs + shortNames: + - config + singular: config + scope: Namespaced + validation: + openAPIV3Schema: + description: Config is the Schema for the configs API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConfigSpec defines the desired state of Config + properties: + match: + description: Configuration for namespace exclusion + items: + properties: + excludedNamespaces: + items: + type: string + type: array + processes: + items: + type: string + type: array + type: object + type: array + readiness: + description: Configuration for readiness tracker + properties: + statsEnabled: + type: boolean + type: object + sync: + description: Configuration for syncing k8s objects + properties: + syncOnly: + description: If non-empty, only entries on this list will be replicated into OPA + items: + properties: + group: + type: string + kind: + type: string + version: + type: string + type: object + type: array + type: object + validation: + description: Configuration for validation + properties: + traces: + description: List of requests to trace. Both "user" and "kinds" must be specified + items: + properties: + dump: + description: Also dump the state of OPA with the trace. Set to `All` to dump everything. + type: string + kind: + description: Only trace requests of the following GroupVersionKind + properties: + group: + type: string + kind: + type: string + version: + type: string + type: object + user: + description: Only trace requests from the specified user + type: string + type: object + type: array + type: object + type: object + status: + description: ConfigStatus defines the observed state of Config + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/constraintpodstatus-customresourcedefinition.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/constraintpodstatus-customresourcedefinition.yaml new file mode 100755 index 000000000..f8e552080 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/constraintpodstatus-customresourcedefinition.yaml @@ -0,0 +1,68 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + gatekeeper.sh/system: "yes" + name: constraintpodstatuses.status.gatekeeper.sh +spec: + group: status.gatekeeper.sh + names: + kind: ConstraintPodStatus + listKind: ConstraintPodStatusList + plural: constraintpodstatuses + singular: constraintpodstatus + scope: Namespaced + validation: + openAPIV3Schema: + description: ConstraintPodStatus is the Schema for the constraintpodstatuses API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + status: + description: ConstraintPodStatusStatus defines the observed state of ConstraintPodStatus + properties: + constraintUID: + description: Storing the constraint UID allows us to detect drift, such as when a constraint has been recreated after its CRD was deleted out from under it, interrupting the watch + type: string + enforced: + type: boolean + errors: + items: + description: Error represents a single error caught while adding a constraint to OPA + properties: + code: + type: string + location: + type: string + message: + type: string + required: + - code + - message + type: object + type: array + id: + type: string + observedGeneration: + format: int64 + type: integer + operations: + items: + type: string + type: array + type: object + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/constrainttemplate-customresourcedefinition.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/constrainttemplate-customresourcedefinition.yaml new file mode 100755 index 000000000..41daf22de --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/constrainttemplate-customresourcedefinition.yaml @@ -0,0 +1,97 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + labels: + gatekeeper.sh/system: "yes" + name: constrainttemplates.templates.gatekeeper.sh +spec: + group: templates.gatekeeper.sh + names: + kind: ConstraintTemplate + plural: constrainttemplates + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + properties: + crd: + properties: + spec: + properties: + names: + properties: + kind: + type: string + shortNames: + items: + type: string + type: array + type: object + validation: + type: object + type: object + type: object + targets: + items: + properties: + libs: + items: + type: string + type: array + rego: + type: string + target: + type: string + type: object + type: array + type: object + status: + properties: + byPod: + items: + properties: + errors: + items: + properties: + code: + type: string + location: + type: string + message: + type: string + required: + - code + - message + type: object + type: array + id: + description: a unique identifier for the pod that wrote the status + type: string + observedGeneration: + format: int64 + type: integer + type: object + type: array + created: + type: boolean + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true + - name: v1alpha1 + served: true + storage: false diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/constrainttemplatepodstatus-customresourcedefinition.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/constrainttemplatepodstatus-customresourcedefinition.yaml new file mode 100755 index 000000000..804dca48c --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/crd-manifest/constrainttemplatepodstatus-customresourcedefinition.yaml @@ -0,0 +1,67 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.3.0 + creationTimestamp: null + labels: + gatekeeper.sh/system: "yes" + name: constrainttemplatepodstatuses.status.gatekeeper.sh +spec: + group: status.gatekeeper.sh + names: + kind: ConstraintTemplatePodStatus + listKind: ConstraintTemplatePodStatusList + plural: constrainttemplatepodstatuses + singular: constrainttemplatepodstatus + scope: Namespaced + validation: + openAPIV3Schema: + description: ConstraintTemplatePodStatus is the Schema for the constrainttemplatepodstatuses API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + status: + description: ConstraintTemplatePodStatusStatus defines the observed state of ConstraintTemplatePodStatus + properties: + errors: + items: + description: CreateCRDError represents a single error caught during parsing, compiling, etc. + properties: + code: + type: string + location: + type: string + message: + type: string + required: + - code + - message + type: object + type: array + id: + description: 'Important: Run "make" to regenerate code after modifying this file' + type: string + observedGeneration: + format: int64 + type: integer + operations: + items: + type: string + type: array + templateUID: + description: UID is a type that holds unique ID values, including UUIDs. Because we don't ONLY use UUIDs, this is an alias to string. Being a type captures intent and helps make sure that UIDs and names do not get conflated. + type: string + type: object + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/_helpers.tpl b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/_helpers.tpl new file mode 100755 index 000000000..39b26c195 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/_helpers.tpl @@ -0,0 +1,7 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/jobs.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/jobs.yaml new file mode 100755 index 000000000..709005fd9 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/jobs.yaml @@ -0,0 +1,92 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Chart.Name }}-create + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Chart.Name }} + annotations: + "helm.sh/hook": post-install, post-upgrade, post-rollback + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + metadata: + name: {{ .Chart.Name }}-create + labels: + app: {{ .Chart.Name }} + spec: + serviceAccountName: {{ .Chart.Name }}-manager + securityContext: + runAsNonRoot: true + runAsUser: 1000 + containers: + - name: create-crds + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + command: + - /bin/kubectl + - apply + - -f + - /etc/config/crd-manifest.yaml + volumeMounts: + - name: crd-manifest + readOnly: true + mountPath: /etc/config + restartPolicy: OnFailure + volumes: + - name: crd-manifest + configMap: + name: {{ .Chart.Name }}-manifest +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Chart.Name }}-delete + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Chart.Name }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + metadata: + name: {{ .Chart.Name }}-delete + labels: + app: {{ .Chart.Name }} + spec: + serviceAccountName: {{ .Chart.Name }}-manager + securityContext: + runAsNonRoot: true + runAsUser: 1000 + initContainers: + - name: remove-finalizers + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + command: + - /bin/kubectl + - apply + - -f + - /etc/config/crd-manifest.yaml + volumeMounts: + - name: crd-manifest + readOnly: true + mountPath: /etc/config + containers: + - name: delete-crds + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + command: + - /bin/kubectl + - delete + - -f + - /etc/config/crd-manifest.yaml + volumeMounts: + - name: crd-manifest + readOnly: true + mountPath: /etc/config + restartPolicy: OnFailure + volumes: + - name: crd-manifest + configMap: + name: {{ .Chart.Name }}-manifest diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/manifest.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/manifest.yaml new file mode 100755 index 000000000..31016b6ef --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/manifest.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Chart.Name }}-manifest + namespace: {{ .Release.Namespace }} +data: + crd-manifest.yaml: | + {{- $currentScope := . -}} + {{- $crds := (.Files.Glob "crd-manifest/**.yaml") -}} + {{- range $path, $_ := $crds -}} + {{- with $currentScope -}} + {{ .Files.Get $path | nindent 4 }} + --- + {{- end -}}{{- end -}} diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/rbac.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/rbac.yaml new file mode 100755 index 000000000..bdda1ddad --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/templates/rbac.yaml @@ -0,0 +1,72 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Chart.Name }}-manager + labels: + app: {{ .Chart.Name }}-manager +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: ['create', 'get', 'patch', 'delete'] +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ .Chart.Name }}-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Chart.Name }}-manager + labels: + app: {{ .Chart.Name }}-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Chart.Name }}-manager +subjects: +- kind: ServiceAccount + name: {{ .Chart.Name }}-manager + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Chart.Name }}-manager + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Chart.Name }}-manager +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ .Chart.Name }}-manager + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Chart.Name }}-manager +spec: + privileged: false + allowPrivilegeEscalation: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'configMap' + - 'secret' diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/values.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/values.yaml new file mode 100755 index 000000000..657ccacf8 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper-crd/3.3.001/values.yaml @@ -0,0 +1,11 @@ +# Default values for rancher-gatekeeper-crd. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +global: + cattle: + systemDefaultRegistry: "" + +image: + repository: rancher/kubectl + tag: v1.20.2 diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/.helmignore b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/CHANGELOG.md b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/CHANGELOG.md new file mode 100755 index 000000000..c68d23c24 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/CHANGELOG.md @@ -0,0 +1,15 @@ +# Changelog +All notable changes from the upstream OPA Gatekeeper chart will be added to this file + +## [Package Version 00] - 2020-09-10 +### Added +- Enabled the CRD chart generator in `package.yaml` + +### Modified +- Updated namespace to `cattle-gatekeeper-system` +- Updated for Helm 3 compatibility + - Moved crds to `crds` directory + - Removed `crd-install` hooks and templates from crds + +### Removed +- Removed `gatekeeper-system-namespace.yaml` as Rancher handles namespaces for chart installation diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/Chart.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/Chart.yaml new file mode 100755 index 000000000..95989f1f6 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/Chart.yaml @@ -0,0 +1,22 @@ +annotations: + catalog.cattle.io/auto-install: rancher-gatekeeper-crd=match + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: OPA Gatekeeper + catalog.cattle.io/namespace: cattle-gatekeeper-system + catalog.cattle.io/os: linux + catalog.cattle.io/provides-gvr: config.gatekeeper.sh.config/v1alpha1 + catalog.cattle.io/release-name: rancher-gatekeeper + catalog.cattle.io/ui-component: gatekeeper +apiVersion: v1 +appVersion: v3.3.0 +description: Modifies Open Policy Agent's upstream gatekeeper chart that provides + policy-based control for cloud native environments +home: https://github.com/open-policy-agent/gatekeeper +icon: https://charts.rancher.io/assets/logos/gatekeeper.svg +keywords: +- open policy agent +- security +name: rancher-gatekeeper +sources: +- https://github.com/open-policy-agent/gatekeeper.git +version: 3.3.001 diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/README.md b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/README.md new file mode 100755 index 000000000..45cf27c79 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/README.md @@ -0,0 +1,39 @@ +# Gatekeeper Helm Chart + +## Parameters + +| Parameter | Description | Default | +| :---------------------------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------ | +| auditInterval | The frequency with which audit is run | `300` | +| constraintViolationsLimit | The maximum # of audit violations reported on a constraint | `20` | +| auditFromCache | Take the roster of resources to audit from the OPA cache | `false` | +| auditChunkSize | Chunk size for listing cluster resources for audit (alpha feature) | `0` | +| disableValidatingWebhook | Disable the validating webhook | `false` | +| validatingWebhookTimeoutSeconds | The timeout for the validating webhook in seconds | `3` | +| enableDeleteOperations | Enable validating webhook for delete operations | `false` | +| emitAdmissionEvents | Emit K8s events in gatekeeper namespace for admission violations (alpha feature) | `false` | +| emitAuditEvents | Emit K8s events in gatekeeper namespace for audit violations (alpha feature) | `false` | +| logLevel | Minimum log level | `INFO` | +| image.pullPolicy | The image pull policy | `IfNotPresent` | +| image.repository | Image repository | `openpolicyagent/gatekeeper` | +| image.release | The image release tag to use | Current release version: `v3.3.0` | +| image.pullSecrets | Specify an array of imagePullSecrets | `[]` | +| resources | The resource request/limits for the container image | limits: 1 CPU, 512Mi, requests: 100mCPU, 256Mi | +| nodeSelector | The node selector to use for pod scheduling | `kubernetes.io/os: linux` | +| affinity | The node affinity to use for pod scheduling | `{}` | +| tolerations | The tolerations to use for pod scheduling | `[]` | +| controllerManager.priorityClassName | Priority class name for controller manager | `system-cluster-critical` | +| audit.priorityClassName | Priority class name for audit controller | `system-cluster-critical` | +| replicas | The number of Gatekeeper replicas to deploy for the webhook | `1` | +| podAnnotations | The annotations to add to the Gatekeeper pods | `container.seccomp.security.alpha.kubernetes.io/manager: runtime/default` | +| secretAnnotations | The annotations to add to the Gatekeeper secrets | `{}` | +| customResourceDefinitions.create | Whether the release should install CRDs. Regardless of this value, Helm v3+ will install the CRDs if those are not present already. Use --skip-crds with helm install if you want to skip CRD creation | `true` | + +## Contributing Changes + +This Helm chart is autogenerated from the Gatekeeper static manifest. The +generator code lives under `cmd/build/helmify`. To make modifications to this +template, please edit `kustomization.yaml` and `replacements.go` under that +directory and then run `make manifests`. Your changes will show up in the +`manifest_staging` directory and will be promoted to the root `charts` directory +the next time a Gatekeeper release is cut. diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/app-readme.md b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/app-readme.md new file mode 100755 index 000000000..d44cf7b2b --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/app-readme.md @@ -0,0 +1,14 @@ +# Rancher OPA Gatekeeper + +This chart is based off of the upstream [OPA Gatekeeper](https://github.com/open-policy-agent/gatekeeper/tree/master/charts/gatekeeper) chart. + +For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/opa-gatekeper/). + +The chart installs the following components: + +- OPA Gatekeeper Controller-Manager - OPA Gatekeeper is a policy engine for providing policy based governance for Kubernetes clusters. The controller installs as a [validating admission controller webhook](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#validatingadmissionwebhook) on the cluster and intercepts all admission requests that create, update or delete a resource in the cluster. +- [Audit](https://github.com/open-policy-agent/gatekeeper#audit) - A periodic audit of the cluster resources against the enforced policies. Any existing resource that violates a policy will be recorded as violations. +- [Constraint Template](https://github.com/open-policy-agent/gatekeeper#constraint-templates) - A template is a CRD (`ConstraintTemplate`) that defines the schema and Rego logic of a policy to be applied to the cluster by Gatekeeper's admission controller webhook. This chart installs a few default `ConstraintTemplate` custom resources. +- [Constraint](https://github.com/open-policy-agent/gatekeeper#constraints) - A constraint is a custom resource that defines the scope of resources which a specific constraint template should apply to. The complete policy is defined by a combination of `ConstraintTemplates` (i.e. what the policy is) and `Constraints` (i.e. what resource to apply the policy to). + +For more information on how to configure the Helm chart, refer to the Helm README. diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/_helpers.tpl b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/_helpers.tpl new file mode 100755 index 000000000..f5d0ab307 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/_helpers.tpl @@ -0,0 +1,52 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "gatekeeper.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "gatekeeper.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "gatekeeper.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "gatekeeper.labels" -}} +app.kubernetes.io/name: {{ include "gatekeeper.name" . }} +helm.sh/chart: {{ include "gatekeeper.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/allowedrepos.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/allowedrepos.yaml new file mode 100755 index 000000000..9abb84ecb --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/allowedrepos.yaml @@ -0,0 +1,35 @@ +apiVersion: templates.gatekeeper.sh/v1beta1 +kind: ConstraintTemplate +metadata: + name: k8sallowedrepos +spec: + crd: + spec: + names: + kind: K8sAllowedRepos + validation: + # Schema for the `parameters` field + openAPIV3Schema: + properties: + repos: + type: array + items: + type: string + targets: + - target: admission.k8s.gatekeeper.sh + rego: | + package k8sallowedrepos + + violation[{"msg": msg}] { + container := input.review.object.spec.containers[_] + satisfied := [good | repo = input.parameters.repos[_] ; good = startswith(container.image, repo)] + not any(satisfied) + msg := sprintf("container <%v> has an invalid image repo <%v>, allowed repos are %v", [container.name, container.image, input.parameters.repos]) + } + + violation[{"msg": msg}] { + container := input.review.object.spec.initContainers[_] + satisfied := [good | repo = input.parameters.repos[_] ; good = startswith(container.image, repo)] + not any(satisfied) + msg := sprintf("container <%v> has an invalid image repo <%v>, allowed repos are %v", [container.name, container.image, input.parameters.repos]) + } diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-admin-podsecuritypolicy.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-admin-podsecuritypolicy.yaml new file mode 100755 index 000000000..78f36ecfb --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-admin-podsecuritypolicy.yaml @@ -0,0 +1,35 @@ +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + labels: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' + name: gatekeeper-admin +spec: + allowPrivilegeEscalation: false + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - projected + - secret + - downwardAPI diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-admin-serviceaccount.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-admin-serviceaccount.yaml new file mode 100755 index 000000000..4b68998cb --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-admin-serviceaccount.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' + name: gatekeeper-admin + namespace: '{{ .Release.Namespace }}' diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-audit-deployment.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-audit-deployment.yaml new file mode 100755 index 000000000..95ccaa767 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-audit-deployment.yaml @@ -0,0 +1,103 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + control-plane: audit-controller + gatekeeper.sh/operation: audit + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' + name: gatekeeper-audit + namespace: '{{ .Release.Namespace }}' +spec: + replicas: 1 + selector: + matchLabels: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + control-plane: audit-controller + gatekeeper.sh/operation: audit + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' + template: + metadata: + annotations: +{{- toYaml .Values.podAnnotations | trim | nindent 8 }} + labels: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + control-plane: audit-controller + gatekeeper.sh/operation: audit + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' + spec: + automountServiceAccountToken: true + containers: + - args: + - --audit-interval={{ .Values.auditInterval }} + - --log-level={{ .Values.logLevel }} + - --constraint-violations-limit={{ .Values.constraintViolationsLimit }} + - --audit-from-cache={{ .Values.auditFromCache }} + - --audit-chunk-size={{ .Values.auditChunkSize }} + - --emit-audit-events={{ .Values.emitAuditEvents }} + - --operation=audit + - --operation=status + - --logtostderr + command: + - /manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: '{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}' + imagePullPolicy: '{{ .Values.image.pullPolicy }}' + livenessProbe: + httpGet: + path: /healthz + port: 9090 + name: manager + ports: + - containerPort: 8888 + name: metrics + protocol: TCP + - containerPort: 9090 + name: healthz + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9090 + resources: +{{ toYaml .Values.audit.resources | indent 10 }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + readOnlyRootFilesystem: true + runAsGroup: 999 + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: +{{ toYaml .Values.audit.nodeSelector | indent 8 }} + affinity: +{{ toYaml .Values.audit.affinity | indent 8 }} + tolerations: +{{ toYaml .Values.audit.tolerations | indent 8 }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- if .Values.audit.priorityClassName }} + priorityClassName: {{ .Values.audit.priorityClassName }} +{{- end }} + serviceAccountName: gatekeeper-admin + terminationGracePeriodSeconds: 60 diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-controller-manager-deployment.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-controller-manager-deployment.yaml new file mode 100755 index 000000000..b050f3574 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-controller-manager-deployment.yaml @@ -0,0 +1,124 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + control-plane: controller-manager + gatekeeper.sh/operation: webhook + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' + name: gatekeeper-controller-manager + namespace: '{{ .Release.Namespace }}' +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + control-plane: controller-manager + gatekeeper.sh/operation: webhook + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' + template: + metadata: + annotations: +{{- toYaml .Values.podAnnotations | trim | nindent 8 }} + labels: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + control-plane: controller-manager + gatekeeper.sh/operation: webhook + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: gatekeeper.sh/operation + operator: In + values: + - webhook + topologyKey: kubernetes.io/hostname + weight: 100 + automountServiceAccountToken: true + containers: + - args: + - --port=8443 + - --logtostderr + - --emit-admission-events={{ .Values.emitAdmissionEvents }} + - --log-level={{ .Values.logLevel }} + - --exempt-namespace=gatekeeper-system + - --operation=webhook + command: + - /manager + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: '{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}' + imagePullPolicy: '{{ .Values.image.pullPolicy }}' + livenessProbe: + httpGet: + path: /healthz + port: 9090 + name: manager + ports: + - containerPort: 8443 + name: webhook-server + protocol: TCP + - containerPort: 8888 + name: metrics + protocol: TCP + - containerPort: 9090 + name: healthz + protocol: TCP + readinessProbe: + httpGet: + path: /readyz + port: 9090 + resources: +{{ toYaml .Values.controllerManager.resources | indent 10 }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - all + readOnlyRootFilesystem: true + runAsGroup: 999 + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /certs + name: cert + readOnly: true + nodeSelector: +{{ toYaml .Values.controllerManager.nodeSelector | indent 8 }} + affinity: +{{ toYaml .Values.controllerManager.affinity | indent 8 }} + tolerations: +{{ toYaml .Values.controllerManager.tolerations | indent 8 }} + imagePullSecrets: +{{ toYaml .Values.image.pullSecrets | indent 8 }} +{{- if .Values.controllerManager.priorityClassName }} + priorityClassName: {{ .Values.controllerManager.priorityClassName }} +{{- end }} + serviceAccountName: gatekeeper-admin + terminationGracePeriodSeconds: 60 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: gatekeeper-webhook-server-cert diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-role-clusterrole.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-role-clusterrole.yaml new file mode 100755 index 000000000..05577fb22 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-role-clusterrole.yaml @@ -0,0 +1,139 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + labels: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' + name: gatekeeper-manager-role +rules: +- apiGroups: + - '*' + resources: + - '*' + verbs: + - get + - list + - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - config.gatekeeper.sh + resources: + - configs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - config.gatekeeper.sh + resources: + - configs/status + verbs: + - get + - patch + - update +- apiGroups: + - constraints.gatekeeper.sh + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - mutations.gatekeeper.sh + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - policy + resourceNames: + - gatekeeper-admin + resources: + - podsecuritypolicies + verbs: + - use +- apiGroups: + - status.gatekeeper.sh + resources: + - '*' + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - templates.gatekeeper.sh + resources: + - constrainttemplates + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - templates.gatekeeper.sh + resources: + - constrainttemplates/finalizers + verbs: + - delete + - get + - patch + - update +- apiGroups: + - templates.gatekeeper.sh + resources: + - constrainttemplates/status + verbs: + - get + - patch + - update +- apiGroups: + - admissionregistration.k8s.io + resourceNames: + - gatekeeper-validating-webhook-configuration + resources: + - validatingwebhookconfigurations + verbs: + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-role-role.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-role-role.yaml new file mode 100755 index 000000000..73e2c5cf7 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-role-role.yaml @@ -0,0 +1,32 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + creationTimestamp: null + labels: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' + name: gatekeeper-manager-role + namespace: '{{ .Release.Namespace }}' +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-rolebinding-clusterrolebinding.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-rolebinding-clusterrolebinding.yaml new file mode 100755 index 000000000..22194d2ad --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-rolebinding-clusterrolebinding.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' + name: gatekeeper-manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: gatekeeper-manager-role +subjects: +- kind: ServiceAccount + name: gatekeeper-admin + namespace: '{{ .Release.Namespace }}' diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-rolebinding-rolebinding.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-rolebinding-rolebinding.yaml new file mode 100755 index 000000000..4bf6087dc --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-manager-rolebinding-rolebinding.yaml @@ -0,0 +1,19 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' + name: gatekeeper-manager-rolebinding + namespace: '{{ .Release.Namespace }}' +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: gatekeeper-manager-role +subjects: +- kind: ServiceAccount + name: gatekeeper-admin + namespace: '{{ .Release.Namespace }}' diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-validating-webhook-configuration-validatingwebhookconfiguration.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-validating-webhook-configuration-validatingwebhookconfiguration.yaml new file mode 100755 index 000000000..ba72d918e --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-validating-webhook-configuration-validatingwebhookconfiguration.yaml @@ -0,0 +1,61 @@ +{{- if not .Values.disableValidatingWebhook }} +apiVersion: admissionregistration.k8s.io/v1beta1 +kind: ValidatingWebhookConfiguration +metadata: + creationTimestamp: null + labels: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' + name: gatekeeper-validating-webhook-configuration +webhooks: +- clientConfig: + caBundle: Cg== + service: + name: gatekeeper-webhook-service + namespace: '{{ .Release.Namespace }}' + path: /v1/admit + failurePolicy: Ignore + name: validation.gatekeeper.sh + namespaceSelector: + matchExpressions: + - key: admission.gatekeeper.sh/ignore + operator: DoesNotExist + rules: + - apiGroups: + - '*' + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + {{- if .Values.enableDeleteOperations }} + - DELETE + {{- end}} + resources: + - '*' + sideEffects: None + timeoutSeconds: {{ .Values.validatingWebhookTimeoutSeconds }} +- clientConfig: + caBundle: Cg== + service: + name: gatekeeper-webhook-service + namespace: '{{ .Release.Namespace }}' + path: /v1/admitlabel + failurePolicy: Fail + name: check-ignore-label.gatekeeper.sh + rules: + - apiGroups: + - "" + apiVersions: + - '*' + operations: + - CREATE + - UPDATE + resources: + - namespaces + sideEffects: None + timeoutSeconds: {{ .Values.validatingWebhookTimeoutSeconds }} +{{- end }} diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-webhook-server-cert-secret.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-webhook-server-cert-secret.yaml new file mode 100755 index 000000000..5438a377d --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-webhook-server-cert-secret.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: Secret +metadata: + annotations: +{{- toYaml .Values.secretAnnotations | trim | nindent 4 }} + labels: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' + name: gatekeeper-webhook-server-cert + namespace: '{{ .Release.Namespace }}' diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-webhook-service-service.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-webhook-service-service.yaml new file mode 100755 index 000000000..473bc4b25 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/gatekeeper-webhook-service-service.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' + name: gatekeeper-webhook-service + namespace: '{{ .Release.Namespace }}' +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + app: '{{ template "gatekeeper.name" . }}' + chart: '{{ template "gatekeeper.name" . }}' + control-plane: controller-manager + gatekeeper.sh/operation: webhook + gatekeeper.sh/system: "yes" + heritage: '{{ .Release.Service }}' + release: '{{ .Release.Name }}' diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/requiredlabels.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/requiredlabels.yaml new file mode 100755 index 000000000..e93e6a0a7 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/requiredlabels.yaml @@ -0,0 +1,57 @@ +apiVersion: templates.gatekeeper.sh/v1beta1 +kind: ConstraintTemplate +metadata: + name: k8srequiredlabels +spec: + crd: + spec: + names: + kind: K8sRequiredLabels + validation: + # Schema for the `parameters` field + openAPIV3Schema: + properties: + message: + type: string + labels: + type: array + items: + type: object + properties: + key: + type: string + allowedRegex: + type: string + targets: + - target: admission.k8s.gatekeeper.sh + rego: | + package k8srequiredlabels + + get_message(parameters, _default) = msg { + not parameters.message + msg := _default + } + + get_message(parameters, _default) = msg { + msg := parameters.message + } + + violation[{"msg": msg, "details": {"missing_labels": missing}}] { + provided := {label | input.review.object.metadata.labels[label]} + required := {label | label := input.parameters.labels[_].key} + missing := required - provided + count(missing) > 0 + def_msg := sprintf("you must provide labels: %v", [missing]) + msg := get_message(input.parameters, def_msg) + } + + violation[{"msg": msg}] { + value := input.review.object.metadata.labels[key] + expected := input.parameters.labels[_] + expected.key == key + # do not match if allowedRegex is not defined, or is an empty string + expected.allowedRegex != "" + not re_match(expected.allowedRegex, value) + def_msg := sprintf("Label <%v: %v> does not satisfy allowed regex: %v", [key, value, expected.allowedRegex]) + msg := get_message(input.parameters, def_msg) + } diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/validate-install-crd.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/validate-install-crd.yaml new file mode 100755 index 000000000..875d7af02 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/templates/validate-install-crd.yaml @@ -0,0 +1,17 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +# {{- $found := dict -}} +# {{- set $found "config.gatekeeper.sh/v1alpha1/Config" false -}} +# {{- set $found "status.gatekeeper.sh/v1beta1/ConstraintPodStatus" false -}} +# {{- set $found "templates.gatekeeper.sh/v1beta1/ConstraintTemplate" false -}} +# {{- set $found "status.gatekeeper.sh/v1beta1/ConstraintTemplatePodStatus" false -}} +# {{- range .Capabilities.APIVersions -}} +# {{- if hasKey $found (toString .) -}} +# {{- set $found (toString .) true -}} +# {{- end -}} +# {{- end -}} +# {{- range $_, $exists := $found -}} +# {{- if (eq $exists false) -}} +# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}} +# {{- end -}} +# {{- end -}} +#{{- end -}} \ No newline at end of file diff --git a/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/values.yaml b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/values.yaml new file mode 100755 index 000000000..899119fc6 --- /dev/null +++ b/charts/rancher-gatekeeper/rancher-gatekeeper/3.3.001/values.yaml @@ -0,0 +1,60 @@ +replicas: 3 +auditInterval: 300 +constraintViolationsLimit: 20 +auditFromCache: false +disableValidatingWebhook: false +validatingWebhookTimeoutSeconds: 3 +enableDeleteOperations: false +auditChunkSize: 0 +logLevel: INFO +emitAdmissionEvents: false +emitAuditEvents: false +image: + repository: rancher/mirrored-openpolicyagent-gatekeeper + tag: v3.3.0 + pullPolicy: IfNotPresent + pullSecrets: [] +podAnnotations: + { container.seccomp.security.alpha.kubernetes.io/manager: runtime/default } +secretAnnotations: {} +controllerManager: + priorityClassName: system-cluster-critical + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: gatekeeper.sh/operation + operator: In + values: + - webhook + topologyKey: kubernetes.io/hostname + weight: 100 + tolerations: [] + nodeSelector: { kubernetes.io/os: linux } + resources: + limits: + cpu: 1000m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi +audit: + priorityClassName: system-cluster-critical + affinity: {} + tolerations: [] + nodeSelector: { kubernetes.io/os: linux } + resources: + limits: + cpu: 1000m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi +global: + cattle: + systemDefaultRegistry: "" + kubectl: + repository: rancher/kubectl + tag: v1.20.2 diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/.helmignore b/charts/rancher-grafana/rancher-grafana/6.6.401/.helmignore new file mode 100755 index 000000000..8cade1318 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.vscode +.project +.idea/ +*.tmproj +OWNERS diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/Chart.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/Chart.yaml new file mode 100755 index 000000000..83ff41050 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/Chart.yaml @@ -0,0 +1,28 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-grafana +apiVersion: v2 +appVersion: 7.4.5 +description: The leading tool for querying and visualizing time series and metrics. +home: https://grafana.net +icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png +kubeVersion: ^1.8.0-0 +maintainers: +- email: zanhsieh@gmail.com + name: zanhsieh +- email: rluckie@cisco.com + name: rtluckie +- email: maor.friedman@redhat.com + name: maorfr +- email: miroslav.hadzhiev@gmail.com + name: Xtigyro +- email: mail@torstenwalter.de + name: torstenwalter +name: rancher-grafana +sources: +- https://github.com/grafana/grafana +type: application +version: 6.6.401 diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/README.md b/charts/rancher-grafana/rancher-grafana/6.6.401/README.md new file mode 100755 index 000000000..957f019ec --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/README.md @@ -0,0 +1,514 @@ +# Grafana Helm Chart + +* Installs the web dashboarding system [Grafana](http://grafana.org/) + +## Get Repo Info + +```console +helm repo add grafana https://grafana.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release grafana/grafana +``` + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 4.0.0 (And 3.12.1) + +This version requires Helm >= 2.12.0. + +### To 5.0.0 + +You have to add --force to your helm upgrade command as the labels of the chart have changed. + +### To 6.0.0 + +This version requires Helm >= 3.1.0. + +## Configuration + +| Parameter | Description | Default | +|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `replicas` | Number of nodes | `1` | +| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` | +| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` | +| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` | +| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` | +| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`| +| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` | +| `priorityClassName` | Name of Priority Class to assign pods | `nil` | +| `image.repository` | Image repository | `grafana/grafana` | +| `image.tag` | Image tag (`Must be >= 5.0.0`) | `7.4.5` | +| `image.sha` | Image sha (optional) | `2b56f6106ddc376bb46d974230d530754bf65a640dfbc5245191d72d3b49efc6` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Image pull secrets | `{}` | +| `service.type` | Kubernetes service type | `ClusterIP` | +| `service.port` | Kubernetes port where service is exposed | `80` | +| `service.portName` | Name of the port on the service | `service` | +| `service.targetPort` | Internal service is port | `3000` | +| `service.nodePort` | Kubernetes service nodePort | `nil` | +| `service.annotations` | Service annotations | `{}` | +| `service.labels` | Custom labels | `{}` | +| `service.clusterIP` | internal cluster service IP | `nil` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` | +| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` | +| `service.externalIPs` | service external IP addresses | `[]` | +| `extraExposePorts` | Additional service ports for sidecar containers| `[]` | +| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` | +| `ingress.enabled` | Enables Ingress | `false` | +| `ingress.annotations` | Ingress annotations (values are templated) | `{}` | +| `ingress.labels` | Custom labels | `{}` | +| `ingress.path` | Ingress accepted path | `/` | +| `ingress.pathType` | Ingress type of path | `Prefix` | +| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` | +| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). | `[]` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `resources` | CPU/Memory resource requests/limits | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `affinity` | Affinity settings for pod assignment | `{}` | +| `extraInitContainers` | Init containers to add to the grafana pod | `{}` | +| `extraContainers` | Sidecar containers to add to the grafana pod | `{}` | +| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` | +| `extraLabels` | Custom labels for all manifests | `{}` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `persistence.enabled` | Use persistent volume to store data | `false` | +| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` | +| `persistence.size` | Size of persistent volume claim | `10Gi` | +| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` | +| `persistence.storageClassName` | Type of persistent volume claim | `nil` | +| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` | +| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` | +| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` | +| `persistence.subPath` | Mount a sub dir of the persistent volume | `nil` | +| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` | +| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` | +| `initChownData.enabled` | If false, don't reset data ownership at startup | true | +| `initChownData.image.repository` | init-chown-data container image repository | `busybox` | +| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` | +| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` | +| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` | +| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` | +| `schedulerName` | Alternate scheduler name | `nil` | +| `env` | Extra environment variables passed to pods | `{}` | +| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. | `{}` | +| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret | `{}` | +| `extraSecretMounts` | Additional grafana server secret mounts | `[]` | +| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` | +| `extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` | +| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` | +| `plugins` | Plugins to be loaded along with Grafana | `[]` | +| `datasources` | Configure grafana datasources (passed through tpl) | `{}` | +| `notifiers` | Configure grafana notifiers | `{}` | +| `dashboardProviders` | Configure grafana dashboard providers | `{}` | +| `dashboards` | Dashboards to import | `{}` | +| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` | +| `grafana.ini` | Grafana's primary configuration | `{}` | +| `ldap.enabled` | Enable LDAP authentication | `false` | +| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` | +| `ldap.config` | Grafana's LDAP configuration | `""` | +| `annotations` | Deployment annotations | `{}` | +| `labels` | Deployment labels | `{}` | +| `podAnnotations` | Pod annotations | `{}` | +| `podLabels` | Pod labels | `{}` | +| `podPortName` | Name of the grafana port on the pod | `grafana` | +| `sidecar.image.repository` | Sidecar image repository | `quay.io/kiwigrid/k8s-sidecar` | +| `sidecar.image.tag` | Sidecar image tag | `1.10.7` | +| `sidecar.image.sha` | Sidecar image sha (optional) | `""` | +| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | +| `sidecar.resources` | Sidecar resources | `{}` | +| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable | `false` | +| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | +| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` | +| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` | +| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` | +| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` | +| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` | +| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` | +| `sidecar.dashboards.provider.type` | Provider type | `file` | +| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` | +| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` | +| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` | +| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `nil` | +| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` | +| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` | +| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | +| `sidecar.dashboards.searchNamespace` | If specified, the sidecar will search for dashboard config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | +| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | +| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `nil` | +| `sidecar.datasources.searchNamespace` | If specified, the sidecar will search for datasources config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` | +| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` | +| `sidecar.notifiers.searchNamespace` | If specified, the sidecar will search for notifiers config-maps (or secrets) inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | +| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | +| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | +| `admin.existingSecret` | The name of an existing secret containing the admin credentials. | `""` | +| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` | +| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` | +| `serviceAccount.annotations` | ServiceAccount annotations | | +| `serviceAccount.create` | Create service account | `true` | +| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | +| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` | +| `rbac.create` | Create and use RBAC resources | `true` | +| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` | +| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` | +| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `true` | +| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `true` | +| `rbac.extraRoleRules` | Additional rules to add to the Role | [] | +| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | +| `command` | Define command to be executed by grafana container at startup | `nil` | +| `testFramework.enabled` | Whether to create test-related resources | `true` | +| `testFramework.image` | `test-framework` image repository. | `bats/bats` | +| `testFramework.tag` | `test-framework` image tag. | `v1.1.0` | +| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` | +| `testFramework.securityContext` | `test-framework` securityContext | `{}` | +| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` | +| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` | +| `downloadDashboardsImage.repository` | Curl docker image repo | `curlimages/curl` | +| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` | +| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` | +| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` | +| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) | +| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` | +| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | | +| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` | +| `serviceMonitor.path` | Path to scrape | `/metrics` | +| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` | +| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` | +| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` | +| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` | +| `serviceMonitor.relabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` | +| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` | +| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` | +| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` | +| `imageRenderer.image.tag` | image-renderer Image tag | `latest` | +| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` | +| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` | +| `imageRenderer.env` | extra env-vars for image-renderer | `{}` | +| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` | +| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` | +| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` | +| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` | +| `imageRenderer.service.portName` | image-renderer service port name | `'http'` | +| `imageRenderer.service.port` | image-renderer service port used by both service and deployment | `8081` | +| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` | +| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` | +| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` | +| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` | +| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` | +| `imageRenderer.resources` | Set resource limits for image-renderer pdos | `{}` | + +### Example ingress with path + +With grafana 6.3 and above +```yaml +grafana.ini: + server: + domain: monitoring.example.com + root_url: "%(protocol)s://%(domain)s/grafana" + serve_from_sub_path: true +ingress: + enabled: true + hosts: + - "monitoring.example.com" + path: "/grafana" +``` + +### Example of extraVolumeMounts + +```yaml +- extraVolumeMounts: + - name: plugins + mountPath: /var/lib/grafana/plugins + subPath: configs/grafana/plugins + existingClaim: existing-grafana-claim + readOnly: false +``` + +## Import dashboards + +There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +dashboards: + default: + some-dashboard: + json: | + { + "annotations": + + ... + # Complete json file here + ... + + "title": "Some Dashboard", + "uid": "abcd1234", + "version": 1 + } + custom-dashboard: + # This is a path to a file inside the dashboards directory inside the chart directory + file: dashboards/custom-dashboard.json + prometheus-stats: + # Ref: https://grafana.com/dashboards/2 + gnetId: 2 + revision: 2 + datasource: Prometheus + local-dashboard: + url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json +``` + +## BASE64 dashboards + +Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit) +A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk. +If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk. + +### Gerrit use case + +Gerrit API for download files has the following schema: where {project-name} and +{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard +the url value is + +## Sidecar for dashboards + +If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana +pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with +a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written +to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported +dashboards are deleted/updated. + +A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside +one configmap is currently not properly mirrored in grafana. + +Example dashboard config: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-grafana-dashboard + labels: + grafana_dashboard: "1" +data: + k8s-dashboard.json: |- + [...] +``` + +## Sidecar for datasources + +If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the data sources in grafana can be imported. + +Secrets are recommended over configmaps for this usecase because datasources usually contain private +data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): + +```yaml +datasources: + datasources.yaml: + apiVersion: 1 + datasources: + # name of the datasource. Required + - name: Graphite + # datasource type. Required + type: graphite + # access mode. proxy or direct (Server or Browser in the UI). Required + access: proxy + # org id. will default to orgId 1 if not specified + orgId: 1 + # url + url: http://localhost:8080 + # database password, if used + password: + # database user, if used + user: + # database name, if used + database: + # enable/disable basic auth + basicAuth: + # basic auth username + basicAuthUser: + # basic auth password + basicAuthPassword: + # enable/disable with credentials headers + withCredentials: + # mark as default datasource. Max one per org + isDefault: + # fields that will be converted to json and stored in json_data + jsonData: + graphiteVersion: "1.1" + tlsAuth: true + tlsAuthWithCACert: true + # json object of data that will be encrypted. + secureJsonData: + tlsCACert: "..." + tlsClientCert: "..." + tlsClientKey: "..." + version: 1 + # allow users to edit datasources from the UI. + editable: false +``` + +## Sidecar for notifiers + +If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the notification channels in grafana can be imported. The secrets must be created before +`helm install` so that the notifiers init container can list the secrets. + +Secrets are recommended over configmaps for this usecase because alert notification channels usually contain +private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels): + +```yaml +notifiers: + - name: notification-channel-1 + type: slack + uid: notifier1 + # either + org_id: 2 + # or + org_name: Main Org. + is_default: true + send_reminder: true + frequency: 1h + disable_resolve_message: false + # See `Supported Settings` section for settings supporter for each + # alert notification type. + settings: + recipient: 'XXX' + token: 'xoxb' + uploadImage: true + url: https://slack.com + +delete_notifiers: + - name: notification-channel-1 + uid: notifier1 + org_id: 2 + - name: notification-channel-2 + # default org_id: 1 +``` + +## How to serve Grafana with a path prefix (/grafana) + +In order to serve Grafana with a prefix (e.g., ), add the following to your values.yaml. + +```yaml +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + + path: /grafana/?(.*) + hosts: + - k8s.example.dev + +grafana.ini: + server: + root_url: http://localhost:3000/grafana # this host can be localhost +``` + +## How to securely reference secrets in grafana.ini + +This example uses Grafana uses [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets. + +In grafana.ini: + +```yaml +grafana.ini: + [auth.generic_oauth] + enabled = true + client_id = $__file{/etc/secrets/auth_generic_oauth/client_id} + client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret} +``` + +Existing secret, or created along with helm: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: auth-generic-oauth-secret +type: Opaque +stringData: + client_id: + client_secret: +``` + +Include in the `extraSecretMounts` configuration flag: + +```yaml +- extraSecretMounts: + - name: auth-generic-oauth-secret-mount + secretName: auth-generic-oauth-secret + defaultMode: 0440 + mountPath: /etc/secrets/auth_generic_oauth + readOnly: true +``` + +### extraSecretMounts using a Container Storage Interface (CSI) provider + +This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure) + +```yaml +- extraSecretMounts: + - name: secrets-store-inline + mountPath: /run/secrets + readOnly: true + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: "my-provider" + nodePublishSecretRef: + name: akv-creds +``` + +## Image Renderer Plug-In + +This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/docs/remote_rendering_using_docker.md) + +```yaml +imageRenderer: + enabled: true +``` + +### Image Renderer NetworkPolicy + +By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/dashboards/custom-dashboard.json b/charts/rancher-grafana/rancher-grafana/6.6.401/dashboards/custom-dashboard.json new file mode 100755 index 000000000..9e26dfeeb --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/dashboards/custom-dashboard.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/NOTES.txt b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/NOTES.txt new file mode 100755 index 000000000..1fc8436d9 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/NOTES.txt @@ -0,0 +1,54 @@ +1. Get your '{{ .Values.adminUser }}' user password by running: + + kubectl get secret --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo + +2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}.svc.cluster.local +{{ if .Values.ingress.enabled }} + If you bind grafana to 80, please update values in values.yaml and reinstall: + ``` + securityContext: + runAsUser: 0 + runAsGroup: 0 + fsGroup: 0 + + command: + - "setcap" + - "'cap_net_bind_service=+ep'" + - "/usr/sbin/grafana-server &&" + - "sh" + - "/run.sh" + ``` + Details refer to https://grafana.com/docs/installation/configuration/#http-port. + Or grafana would always crash. + + From outside the cluster, the server URL(s) are: +{{- range .Values.ingress.hosts }} + http://{{ . }} +{{- end }} +{{ else }} + Get the Grafana URL to visit by running these commands in the same shell: +{{ if contains "NodePort" .Values.service.type -}} + export NODE_PORT=$(kubectl get --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{ else if contains "LoadBalancer" .Values.service.type -}} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ template "grafana.namespace" . }} -w {{ template "grafana.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + http://$SERVICE_IP:{{ .Values.service.port -}} +{{ else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ template "grafana.namespace" . }} -l "app.kubernetes.io/name={{ template "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ template "grafana.namespace" . }} port-forward $POD_NAME 3000 +{{- end }} +{{- end }} + +3. Login with the password from step 1 and the username: {{ .Values.adminUser }} + +{{- if not .Values.persistence.enabled }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Grafana pod is terminated. ##### +################################################################################# +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/_helpers.tpl b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/_helpers.tpl new file mode 100755 index 000000000..76ad78876 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/_helpers.tpl @@ -0,0 +1,145 @@ +# Rancher +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "grafana.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "grafana.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "grafana.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account +*/}} +{{- define "grafana.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "grafana.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "grafana.serviceAccountNameTest" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} +{{- else -}} + {{ default "default" .Values.serviceAccount.nameTest }} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "grafana.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "grafana.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.extraLabels }} +{{ toYaml .Values.extraLabels }} +{{- end }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "grafana.selectorLabels" -}} +app.kubernetes.io/name: {{ include "grafana.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "grafana.imageRenderer.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.imageRenderer.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels ImageRenderer +*/}} +{{- define "grafana.imageRenderer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "rbac.apiVersion" -}} +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/_pod.tpl b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/_pod.tpl new file mode 100755 index 000000000..2ba9f115c --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/_pod.tpl @@ -0,0 +1,496 @@ +{{- define "grafana.pod" -}} +{{- if .Values.schedulerName }} +schedulerName: "{{ .Values.schedulerName }}" +{{- end }} +serviceAccountName: {{ template "grafana.serviceAccountName" . }} +{{- if .Values.securityContext }} +securityContext: +{{ toYaml .Values.securityContext | indent 2 }} +{{- end }} +{{- if .Values.hostAliases }} +hostAliases: +{{ toYaml .Values.hostAliases | indent 2 }} +{{- end }} +{{- if .Values.priorityClassName }} +priorityClassName: {{ .Values.priorityClassName }} +{{- end }} +{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.datasources.enabled .Values.sidecar.notifiers.enabled .Values.extraInitContainers) }} +initContainers: +{{- end }} +{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} + - name: init-chown-data + {{- if .Values.initChownData.image.sha }} + image: "{{ template "system_default_registry" . }}{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}" + {{- else }} + image: "{{ template "system_default_registry" . }}{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} + securityContext: + runAsNonRoot: false + runAsUser: 0 + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }}", "/var/lib/grafana"] + resources: +{{ toYaml .Values.initChownData.resources | indent 6 }} + volumeMounts: + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} +{{- end }} +{{- if .Values.dashboards }} + - name: download-dashboards + {{- if .Values.downloadDashboardsImage.sha }} + image: "{{ template "system_default_registry" . }}{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}" + {{- else }} + image: "{{ template "system_default_registry" . }}{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} + command: ["/bin/sh"] + args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh /etc/grafana/download_dashboards.sh" ] + resources: +{{ toYaml .Values.downloadDashboards.resources | indent 6 }} + env: +{{- range $key, $value := .Values.downloadDashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" +{{- end }} +{{- if .Values.downloadDashboards.envFromSecret }} + envFrom: + - secretRef: + name: {{ tpl .Values.downloadDashboards.envFromSecret . }} +{{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/download_dashboards.sh" + subPath: download_dashboards.sh + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} +{{- end }} +{{- if .Values.sidecar.datasources.enabled }} + - name: {{ template "grafana.name" . }}-sc-datasources + {{- if .Values.sidecar.image.sha }} + image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + {{- if .Values.sidecar.datasources.envFromSecret }} + envFrom: + - secretRef: + name: {{ tpl .Values.sidecar.datasources.envFromSecret . }} + {{- end }} + env: + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + {{- if .Values.sidecar.datasources.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.sidecar.datasources.labelValue }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: "both" + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.datasources.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: {{ template "grafana.name" . }}-sc-notifiers + {{- if .Values.sidecar.image.sha }} + image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.notifiers.label }}" + - name: FOLDER + value: "/etc/grafana/provisioning/notifiers" + - name: RESOURCE + value: "both" + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.sidecar.notifiers.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.notifiers.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} +{{- if .Values.extraInitContainers }} +{{ toYaml .Values.extraInitContainers | indent 2 }} +{{- end }} +{{- if .Values.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end}} +{{- end }} +containers: +{{- if .Values.sidecar.dashboards.enabled }} + - name: {{ template "grafana.name" . }}-sc-dashboard + {{- if .Values.sidecar.image.sha }} + image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: METHOD + value: {{ .Values.sidecar.dashboards.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.dashboards.label }}" + {{- if .Values.sidecar.dashboards.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.sidecar.dashboards.labelValue }} + {{- end }} + - name: FOLDER + value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: "both" + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.sidecar.dashboards.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.dashboards.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + {{- if .Values.sidecar.dashboards.folderAnnotation }} + - name: FOLDER_ANNOTATION + value: "{{ .Values.sidecar.dashboards.folderAnnotation }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} +{{- end}} + - name: {{ .Chart.Name }} + {{- if .Values.image.sha }} + image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}@sha256:{{ .Values.image.sha }}" + {{- else }} + image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.command }} + command: + {{- range .Values.command }} + - {{ . }} + {{- end }} + {{- end}} +{{- if .Values.containerSecurityContext }} + securityContext: +{{- toYaml .Values.containerSecurityContext | nindent 6 }} +{{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/grafana.ini" + subPath: grafana.ini + {{- if .Values.ldap.enabled }} + - name: ldap + mountPath: "/etc/grafana/ldap.toml" + subPath: ldap.toml + {{- end }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} +{{- if .Values.dashboards }} +{{- range $provider, $dashboards := .Values.dashboards }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} + - name: dashboards-{{ $provider }} + mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + subPath: "{{ $key }}.json" +{{- end }} +{{- end }} +{{- end }} +{{- end -}} +{{- if .Values.dashboardsConfigMaps }} +{{- range (keys .Values.dashboardsConfigMaps | sortAlpha) }} + - name: dashboards-{{ . }} + mountPath: "/var/lib/grafana/dashboards/{{ . }}" +{{- end }} +{{- end }} +{{- if .Values.datasources }} + - name: config + mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml" + subPath: datasources.yaml +{{- end }} +{{- if .Values.notifiers }} + - name: config + mountPath: "/etc/grafana/provisioning/notifiers/notifiers.yaml" + subPath: notifiers.yaml +{{- end }} +{{- if .Values.dashboardProviders }} + - name: config + mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml" + subPath: dashboardproviders.yaml +{{- end }} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} +{{ if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" + subPath: provider.yaml +{{- end}} +{{- end}} +{{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + subPath: {{ .subPath | default "" }} + {{- end }} + {{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + ports: + - name: {{ .Values.service.portName }} + containerPort: {{ .Values.service.targetPort }} + protocol: TCP + - name: {{ .Values.podPortName }} + containerPort: 3000 + protocol: TCP + env: + {{- if not .Values.env.GF_SECURITY_ADMIN_USER }} + - name: GF_SECURITY_ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) }} + - name: GF_SECURITY_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if .Values.plugins }} + - name: GF_INSTALL_PLUGINS + valueFrom: + configMapKeyRef: + name: {{ template "grafana.fullname" . }} + key: plugins + {{- end }} + {{- if .Values.smtp.existingSecret }} + - name: GF_SMTP_USER + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.userKey | default "user" }} + - name: GF_SMTP_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.passwordKey | default "password" }} + {{- end }} + {{ if .Values.imageRenderer.enabled }} + - name: GF_RENDERING_SERVER_URL + value: http://{{ template "grafana.fullname" . }}-image-renderer.{{ template "grafana.namespace" . }}:{{ .Values.imageRenderer.service.port }}/render + - name: GF_RENDERING_CALLBACK_URL + value: http://{{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }} + {{ end }} + {{- range $key, $value := .Values.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: +{{ toYaml $value | indent 10 }} + {{- end }} +{{- range $key, $value := .Values.env }} + - name: "{{ tpl $key $ }}" + value: "{{ tpl (print $value) $ }}" +{{- end }} + {{- if .Values.envFromSecret }} + envFrom: + - secretRef: + name: {{ tpl .Values.envFromSecret . }} + {{- end }} + {{- if .Values.envRenderSecret }} + envFrom: + - secretRef: + name: {{ template "grafana.fullname" . }}-env + {{- end }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 6 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 6 }} + resources: +{{ toYaml .Values.resources | indent 6 }} +{{- with .Values.extraContainers }} +{{ tpl . $ | indent 2 }} +{{- end }} +nodeSelector: {{ include "linux-node-selector" . | nindent 2 }} +{{- if .Values.nodeSelector }} +{{ toYaml .Values.nodeSelector | indent 2 }} +{{- end }} +{{- with .Values.affinity }} +affinity: +{{ toYaml . | indent 2 }} +{{- end }} +tolerations: {{ include "linux-node-tolerations" . | nindent 2 }} +{{- if .Values.tolerations }} +{{ toYaml .Values.tolerations | indent 2 }} +{{- end }} +volumes: + - name: config + configMap: + name: {{ template "grafana.fullname" . }} +{{- range .Values.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} +{{- end }} + {{- if .Values.dashboards }} + {{- range (keys .Values.dashboards | sortAlpha) }} + - name: dashboards-{{ . }} + configMap: + name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }} + {{- end }} + {{- end }} + {{- if .Values.dashboardsConfigMaps }} + {{ $root := . }} + {{- range $provider, $name := .Values.dashboardsConfigMaps }} + - name: dashboards-{{ $provider }} + configMap: + name: {{ tpl $name $root }} + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + - name: ldap + secret: + {{- if .Values.ldap.existingSecret }} + secretName: {{ .Values.ldap.existingSecret }} + {{- else }} + secretName: {{ template "grafana.fullname" . }} + {{- end }} + items: + - key: ldap-toml + path: ldap.toml + {{- end }} +{{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} + - name: storage + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }} +{{- else if and .Values.persistence.enabled (eq .Values.persistence.type "statefulset") }} +# nothing +{{- else }} + - name: storage +{{- if .Values.persistence.inMemory.enabled }} + emptyDir: + medium: Memory +{{- if .Values.persistence.inMemory.sizeLimit }} + sizeLimit: {{ .Values.persistence.inMemory.sizeLimit }} +{{- end -}} +{{- else }} + emptyDir: {} +{{- end -}} +{{- end -}} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + emptyDir: {} +{{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + configMap: + name: {{ template "grafana.fullname" . }}-config-dashboards +{{- end }} +{{- end }} +{{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + emptyDir: {} +{{- end -}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + emptyDir: {} +{{- end -}} +{{- range .Values.extraSecretMounts }} +{{- if .secretName }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + defaultMode: {{ .defaultMode }} +{{- else if .projected }} + - name: {{ .name }} + projected: {{- toYaml .projected | nindent 6 }} +{{- else if .csi }} + - name: {{ .name }} + csi: {{- toYaml .csi | nindent 6 }} +{{- end }} +{{- end }} +{{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + persistentVolumeClaim: + claimName: {{ .existingClaim }} +{{- end }} +{{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + emptyDir: {} +{{- end -}} +{{- if .Values.extraContainerVolumes }} +{{ toYaml .Values.extraContainerVolumes | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/clusterrole.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/clusterrole.yaml new file mode 100755 index 000000000..f09e06563 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/clusterrole.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.rbac.create (not .Values.rbac.namespaced) (not .Values.rbac.useExistingRole) }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.fullname" . }}-clusterrole +{{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraClusterRoleRules) }} +rules: +{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }} +- apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end}} +{{- with .Values.rbac.extraClusterRoleRules }} +{{ toYaml . | indent 0 }} +{{- end}} +{{- else }} +rules: [] +{{- end}} +{{- end}} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/clusterrolebinding.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/clusterrolebinding.yaml new file mode 100755 index 000000000..4accbfac0 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/clusterrolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "grafana.fullname" . }}-clusterrolebinding + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ template "grafana.namespace" . }} +roleRef: + kind: ClusterRole +{{- if (not .Values.rbac.useExistingRole) }} + name: {{ template "grafana.fullname" . }}-clusterrole +{{- else }} + name: {{ .Values.rbac.useExistingRole }} +{{- end }} + apiGroup: rbac.authorization.k8s.io +{{- end -}} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/configmap-dashboard-provider.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/configmap-dashboard-provider.yaml new file mode 100755 index 000000000..65d73858e --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/configmap-dashboard-provider.yaml @@ -0,0 +1,29 @@ +{{- if .Values.sidecar.dashboards.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.fullname" . }}-config-dashboards + namespace: {{ template "grafana.namespace" . }} +data: + provider.yaml: |- + apiVersion: 1 + providers: + - name: '{{ .Values.sidecar.dashboards.provider.name }}' + orgId: {{ .Values.sidecar.dashboards.provider.orgid }} + {{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + folder: '{{ .Values.sidecar.dashboards.provider.folder }}' + {{- end}} + type: {{ .Values.sidecar.dashboards.provider.type }} + disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} + allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }} + updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }} + options: + foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} +{{- end}} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/configmap.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/configmap.yaml new file mode 100755 index 000000000..de32b7ab2 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/configmap.yaml @@ -0,0 +1,80 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +data: +{{- if .Values.plugins }} + plugins: {{ join "," .Values.plugins }} +{{- end }} + grafana.ini: | +{{- range $key, $value := index .Values "grafana.ini" }} + [{{ $key }}] + {{- range $elem, $elemVal := $value }} + {{- if kindIs "invalid" $elemVal }} + {{ $elem }} = + {{- else }} + {{ $elem }} = {{ tpl (toYaml $elemVal) $ }} + {{- end }} + {{- end }} +{{- end }} + +{{- if .Values.datasources }} +{{ $root := . }} + {{- range $key, $value := .Values.datasources }} + {{ $key }}: | +{{ tpl (toYaml $value | indent 4) $root }} + {{- end -}} +{{- end -}} + +{{- if .Values.notifiers }} + {{- range $key, $value := .Values.notifiers }} + {{ $key }}: | +{{ toYaml $value | indent 4 }} + {{- end -}} +{{- end -}} + +{{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{ $key }}: | +{{ toYaml $value | indent 4 }} + {{- end -}} +{{- end -}} + +{{- if .Values.dashboards }} + download_dashboards.sh: | + #!/usr/bin/env sh + set -euf + {{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{- range $value.providers }} + mkdir -p {{ .options.path }} + {{- end }} + {{- end }} + {{- end }} + + {{- range $provider, $dashboards := .Values.dashboards }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} + curl -skf \ + --connect-timeout 60 \ + --max-time 60 \ + {{- if not $value.b64content }} + -H "Accept: application/json" \ + {{- if $value.token }} + -H "Authorization: token {{ $value.token }}" \ + {{- end }} + -H "Content-Type: application/json;charset=UTF-8" \ + {{ end }} + {{- if $value.url -}}"{{ $value.url }}"{{- else -}}"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download"{{- end -}}{{ if $value.datasource }} | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \ + > "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + {{- end -}} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/dashboards-json-configmap.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/dashboards-json-configmap.yaml new file mode 100755 index 000000000..59e0be641 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/dashboards-json-configmap.yaml @@ -0,0 +1,35 @@ +{{- if .Values.dashboards }} +{{ $files := .Files }} +{{- range $provider, $dashboards := .Values.dashboards }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }} + namespace: {{ template "grafana.namespace" $ }} + labels: + {{- include "grafana.labels" $ | nindent 4 }} + dashboard-provider: {{ $provider }} +{{- if $dashboards }} +data: +{{- $dashboardFound := false }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} +{{- $dashboardFound = true }} +{{ print $key | indent 2 }}.json: +{{- if hasKey $value "json" }} + |- +{{ $value.json | indent 6 }} +{{- end }} +{{- if hasKey $value "file" }} +{{ toYaml ( $files.Get $value.file ) | indent 4}} +{{- end }} +{{- end }} +{{- end }} +{{- if not $dashboardFound }} + {} +{{- end }} +{{- end }} +--- +{{- end }} + +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/deployment.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/deployment.yaml new file mode 100755 index 000000000..4d77794cd --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/deployment.yaml @@ -0,0 +1,48 @@ +{{ if (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc")) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.labels }} +{{ toYaml .Values.labels | indent 4 }} +{{- end }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.replicas }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} +{{- with .Values.deploymentStrategy }} + strategy: +{{ toYaml . | trim | indent 4 }} +{{- end }} + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} +{{- with .Values.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} +{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.envRenderSecret }} + checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }} +{{- end }} +{{- with .Values.podAnnotations }} +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/headless-service.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/headless-service.yaml new file mode 100755 index 000000000..2fa816e04 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/headless-service.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "grafana.fullname" . }}-headless + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + clusterIP: None + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} + type: ClusterIP +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/image-renderer-deployment.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/image-renderer-deployment.yaml new file mode 100755 index 000000000..d17b9dfed --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/image-renderer-deployment.yaml @@ -0,0 +1,117 @@ +{{ if .Values.imageRenderer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} +{{- if .Values.imageRenderer.labels }} +{{ toYaml .Values.imageRenderer.labels | indent 4 }} +{{- end }} +{{- with .Values.imageRenderer.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.imageRenderer.replicas }} + revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} +{{- with .Values.imageRenderer.deploymentStrategy }} + strategy: +{{ toYaml . | trim | indent 4 }} +{{- end }} + template: + metadata: + labels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }} +{{- with .Values.imageRenderer.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- with .Values.imageRenderer.podAnnotations }} +{{ toYaml . | indent 8 }} +{{- end }} + spec: + + {{- if .Values.imageRenderer.schedulerName }} + schedulerName: "{{ .Values.imageRenderer.schedulerName }}" + {{- end }} + {{- if .Values.imageRenderer.serviceAccountName }} + serviceAccountName: "{{ .Values.imageRenderer.serviceAccountName }}" + {{- else }} + serviceAccountName: {{ template "grafana.serviceAccountName" . }} + {{- end }} + {{- if .Values.imageRenderer.securityContext }} + securityContext: + {{ toYaml .Values.imageRenderer.securityContext | indent 2 }} + {{- end }} + {{- if .Values.imageRenderer.hostAliases }} + hostAliases: + {{ toYaml .Values.imageRenderer.hostAliases | indent 2 }} + {{- end }} + {{- if .Values.imageRenderer.priorityClassName }} + priorityClassName: {{ .Values.imageRenderer.priorityClassName }} + {{- end }} + {{- if .Values.imageRenderer.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.imageRenderer.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ .Chart.Name }}-image-renderer + {{- if .Values.imageRenderer.image.sha }} + image: "{{ template "system_default_registry" . }}{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}" + {{- else }} + image: "{{ template "system_default_registry" . }}{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }} + {{- if .Values.imageRenderer.command }} + command: + {{- range .Values.imageRenderer.command }} + - {{ . }} + {{- end }} + {{- end}} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + containerPort: {{ .Values.imageRenderer.service.port }} + protocol: TCP + env: + - name: HTTP_PORT + value: {{ .Values.imageRenderer.service.port | quote }} + {{- range $key, $value := .Values.imageRenderer.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{- end }} + securityContext: + capabilities: + drop: ['all'] + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /tmp + name: image-renderer-tmpfs + {{- with .Values.imageRenderer.resources }} + resources: +{{ toYaml . | indent 12 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} + {{- if .Values.imageRenderer.nodeSelector }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.imageRenderer.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} + {{- if .Values.imageRenderer.tolerations }} +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: image-renderer-tmpfs + emptyDir: {} +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/image-renderer-network-policy.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/image-renderer-network-policy.yaml new file mode 100755 index 000000000..f8ca73aab --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/image-renderer-network-policy.yaml @@ -0,0 +1,76 @@ +{{- if and (.Values.imageRenderer.enabled) (.Values.imageRenderer.networkPolicy.limitIngress) }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer-ingress + namespace: {{ template "grafana.namespace" . }} + annotations: + comment: Limit image-renderer ingress traffic from grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- if .Values.imageRenderer.podLabels }} + {{ toYaml .Values.imageRenderer.podLabels | nindent 6 }} + {{- end }} + + policyTypes: + - Ingress + ingress: + - ports: + - port: {{ .Values.imageRenderer.service.port }} + protocol: TCP + from: + - namespaceSelector: + matchLabels: + name: {{ template "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- if .Values.podLabels }} + {{ toYaml .Values.podLabels | nindent 14 }} + {{- end }} +{{ end }} + +{{- if and (.Values.imageRenderer.enabled) (.Values.imageRenderer.networkPolicy.limitEgress) }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer-egress + namespace: {{ template "grafana.namespace" . }} + annotations: + comment: Limit image-renderer egress traffic to grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- if .Values.imageRenderer.podLabels }} + {{ toYaml .Values.imageRenderer.podLabels | nindent 6 }} + {{- end }} + + policyTypes: + - Egress + egress: + # allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # talk only to grafana + - ports: + - port: {{ .Values.service.port }} + protocol: TCP + to: + - namespaceSelector: + matchLabels: + name: {{ template "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- if .Values.podLabels }} + {{ toYaml .Values.podLabels | nindent 14 }} + {{- end }} +{{ end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/image-renderer-service.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/image-renderer-service.yaml new file mode 100755 index 000000000..f5d3eb02f --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/image-renderer-service.yaml @@ -0,0 +1,28 @@ +{{ if .Values.imageRenderer.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} +{{- if .Values.imageRenderer.service.labels }} +{{ toYaml .Values.imageRenderer.service.labels | indent 4 }} +{{- end }} +{{- with .Values.imageRenderer.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + type: ClusterIP + {{- if .Values.imageRenderer.service.clusterIP }} + clusterIP: {{ .Values.imageRenderer.service.clusterIP }} + {{end}} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + port: {{ .Values.imageRenderer.service.port }} + protocol: TCP + targetPort: {{ .Values.imageRenderer.service.targetPort }} + selector: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} +{{ end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/ingress.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/ingress.yaml new file mode 100755 index 000000000..44ebfc950 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/ingress.yaml @@ -0,0 +1,80 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "grafana.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +{{- $ingressPathType := .Values.ingress.pathType -}} +{{- $extraPaths := .Values.ingress.extraPaths -}} +{{- $newAPI := .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} +{{- if $newAPI -}} +apiVersion: networking.k8s.io/v1 +{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} +{{- end }} + {{- if .Values.ingress.annotations }} + annotations: + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} +spec: + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end -}} +{{- if .Values.ingress.tls }} + tls: +{{ tpl (toYaml .Values.ingress.tls) $ | indent 4 }} +{{- end }} + rules: + {{- if .Values.ingress.hosts }} + {{- range .Values.ingress.hosts }} + - host: {{ tpl . $}} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $newAPI }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $newAPI }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end }} + {{- else }} + - http: + paths: + - backend: + {{- if $newAPI }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + pathType: {{ $ingressPathType }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if $ingressPath }} + path: {{ $ingressPath }} + {{- end }} + {{- end -}} +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/nginx-config.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/nginx-config.yaml new file mode 100755 index 000000000..f847c51ce --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/nginx-config.yaml @@ -0,0 +1,75 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-nginx-proxy-config + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +data: + nginx.conf: |- + worker_processes auto; + error_log /dev/stdout warn; + pid /var/cache/nginx/nginx.pid; + + events { + worker_connections 1024; + } + + http { + include /etc/nginx/mime.types; + log_format main '[$time_local - $status] $remote_addr - $remote_user $request ($http_referer)'; + + proxy_connect_timeout 10; + proxy_read_timeout 180; + proxy_send_timeout 5; + proxy_buffering off; + proxy_cache_path /var/cache/nginx/cache levels=1:2 keys_zone=my_zone:100m inactive=1d max_size=10g; + + server { + listen 8080; + access_log off; + + gzip on; + gzip_min_length 1k; + gzip_comp_level 2; + gzip_types text/plain application/javascript application/x-javascript text/css application/xml text/javascript image/jpeg image/gif image/png; + gzip_vary on; + gzip_disable "MSIE [1-6]\."; + + proxy_set_header Host $host; + + location /api/dashboards { + proxy_pass http://localhost:3000; + } + + location /api/search { + proxy_pass http://localhost:3000; + + sub_filter_types application/json; + sub_filter_once off; + sub_filter '"url":"/d' '"url":"d'; + } + + location / { + proxy_cache my_zone; + proxy_cache_valid 200 302 1d; + proxy_cache_valid 301 30d; + proxy_cache_valid any 5m; + proxy_cache_bypass $http_cache_control; + add_header X-Proxy-Cache $upstream_cache_status; + add_header Cache-Control "public"; + + proxy_pass http://localhost:3000/; + + sub_filter_types text/html; + sub_filter_once off; + sub_filter '"appSubUrl":""' '"appSubUrl":"."'; + sub_filter '"url":"/' '"url":"./'; + sub_filter ':"/avatar/' ':"avatar/'; + + if ($request_filename ~ .*\.(?:js|css|jpg|jpeg|gif|png|ico|cur|gz|svg|svgz|mp4|ogg|ogv|webm)$) { + expires 90d; + } + } + } + } diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/poddisruptionbudget.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/poddisruptionbudget.yaml new file mode 100755 index 000000000..61813a436 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/poddisruptionbudget.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.labels }} +{{ toYaml .Values.labels | indent 4 }} +{{- end }} +spec: +{{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} +{{- end }} +{{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} +{{- end }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/podsecuritypolicy.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/podsecuritypolicy.yaml new file mode 100755 index 000000000..19da50791 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/podsecuritypolicy.yaml @@ -0,0 +1,49 @@ +{{- if .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.rbac.pspAnnotations }} + annotations: {{ toYaml .Values.rbac.pspAnnotations | nindent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + # Default set from Docker, without DAC_OVERRIDE or CHOWN + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'csi' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/pvc.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/pvc.yaml new file mode 100755 index 000000000..8d93f5c23 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/pvc.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.persistence.annotations }} + annotations: +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.persistence.finalizers }} + finalizers: +{{ toYaml . | indent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClassName }} + storageClassName: {{ .Values.persistence.storageClassName }} + {{- end -}} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: +{{ toYaml . | indent 6 }} + {{- end }} +{{- end -}} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/role.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/role.yaml new file mode 100755 index 000000000..54c3fb0b2 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/role.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraRoleRules))) }} +rules: +{{- if .Values.rbac.pspEnabled }} +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "grafana.fullname" . }}] +{{- end }} +{{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled) }} +- apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end }} +{{- with .Values.rbac.extraRoleRules }} +{{ toYaml . | indent 0 }} +{{- end}} +{{- else }} +rules: [] +{{- end }} +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/rolebinding.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/rolebinding.yaml new file mode 100755 index 000000000..34f1ad6f8 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/rolebinding.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role +{{- if (not .Values.rbac.useExistingRole) }} + name: {{ template "grafana.fullname" . }} +{{- else }} + name: {{ .Values.rbac.useExistingRole }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ template "grafana.namespace" . }} +{{- end -}} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/secret-env.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/secret-env.yaml new file mode 100755 index 000000000..5c09313e6 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/secret-env.yaml @@ -0,0 +1,14 @@ +{{- if .Values.envRenderSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "grafana.fullname" . }}-env + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +type: Opaque +data: +{{- range $key, $val := .Values.envRenderSecret }} + {{ $key }}: {{ $val | b64enc | quote }} +{{- end -}} +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/secret.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/secret.yaml new file mode 100755 index 000000000..4fdd817da --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/secret.yaml @@ -0,0 +1,22 @@ +{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +type: Opaque +data: + {{- if and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} + admin-user: {{ .Values.adminUser | b64enc | quote }} + {{- if .Values.adminPassword }} + admin-password: {{ .Values.adminPassword | b64enc | quote }} + {{- else }} + admin-password: {{ randAlphaNum 40 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if not .Values.ldap.existingSecret }} + ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/service.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/service.yaml new file mode 100755 index 000000000..276456698 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/service.yaml @@ -0,0 +1,50 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4 }} +{{- end }} +{{- with .Values.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} + type: ClusterIP + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{end}} +{{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} + {{- end -}} +{{- else }} + type: {{ .Values.service.type }} +{{- end }} +{{- if .Values.service.externalIPs }} + externalIPs: +{{ toYaml .Values.service.externalIPs | indent 4 }} +{{- end }} + ports: + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + protocol: TCP + targetPort: {{ .Values.service.targetPort }} +{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{.Values.service.nodePort}} +{{ end }} + {{- if .Values.extraExposePorts }} + {{- tpl (toYaml .Values.extraExposePorts) . | indent 4 }} + {{- end }} + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} + diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/serviceaccount.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/serviceaccount.yaml new file mode 100755 index 000000000..7576eeef0 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ template "grafana.namespace" . }} +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/servicemonitor.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/servicemonitor.yaml new file mode 100755 index 000000000..23288523f --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/servicemonitor.yaml @@ -0,0 +1,40 @@ +{{- if .Values.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "grafana.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- if .Values.serviceMonitor.labels }} + {{- toYaml .Values.serviceMonitor.labels | nindent 4 }} + {{- end }} +spec: + endpoints: + - interval: {{ .Values.serviceMonitor.interval }} + {{- if .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }} + {{- end }} + honorLabels: true + port: {{ .Values.service.portName }} + path: {{ .Values.serviceMonitor.path }} + scheme: {{ .Values.serviceMonitor.scheme }} + {{- if .Values.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml .Values.serviceMonitor.tlsConfig | nindent 6 }} + {{- end }} + {{- if .Values.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + jobLabel: "{{ .Release.Name }}" + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 8 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/statefulset.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/statefulset.yaml new file mode 100755 index 000000000..b2b4616f3 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/statefulset.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + serviceName: {{ template "grafana.fullname" . }}-headless + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} +{{- with .Values.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} + {{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} +{{- end }} +{{- with .Values.podAnnotations }} +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: {{ .Values.persistence.accessModes }} + storageClassName: {{ .Values.persistence.storageClassName }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: +{{ toYaml . | indent 10 }} + {{- end }} +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-configmap.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-configmap.yaml new file mode 100755 index 000000000..ff53aaf1b --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-configmap.yaml @@ -0,0 +1,17 @@ +{{- if .Values.testFramework.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "grafana.fullname" . }}-test + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +data: + run.sh: |- + @test "Test Health" { + url="http://{{ template "grafana.fullname" . }}/api/health" + + code=$(wget --server-response --spider --timeout 10 --tries 1 ${url} 2>&1 | awk '/^ HTTP/{print $2}') + [ "$code" == "200" ] + } +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-podsecuritypolicy.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-podsecuritypolicy.yaml new file mode 100755 index 000000000..1acd65128 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-podsecuritypolicy.yaml @@ -0,0 +1,30 @@ +{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "grafana.fullname" . }}-test + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +spec: + allowPrivilegeEscalation: true + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + fsGroup: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + volumes: + - configMap + - downwardAPI + - emptyDir + - projected + - csi + - secret +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-role.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-role.yaml new file mode 100755 index 000000000..6b10677ae --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-role.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "grafana.fullname" . }}-test + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "grafana.fullname" . }}-test] +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-rolebinding.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-rolebinding.yaml new file mode 100755 index 000000000..58fa5e78b --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-rolebinding.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "grafana.fullname" . }}-test + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "grafana.fullname" . }}-test +subjects: +- kind: ServiceAccount + name: {{ template "grafana.serviceAccountNameTest" . }} + namespace: {{ template "grafana.namespace" . }} +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-serviceaccount.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-serviceaccount.yaml new file mode 100755 index 000000000..5c3350733 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test-serviceaccount.yaml @@ -0,0 +1,9 @@ +{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + name: {{ template "grafana.serviceAccountNameTest" . }} + namespace: {{ template "grafana.namespace" . }} +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test.yaml new file mode 100755 index 000000000..cdc86e5f2 --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/templates/tests/test.yaml @@ -0,0 +1,48 @@ +{{- if .Values.testFramework.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ template "grafana.fullname" . }}-test + labels: + {{- include "grafana.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success + namespace: {{ template "grafana.namespace" . }} +spec: + serviceAccountName: {{ template "grafana.serviceAccountNameTest" . }} + {{- if .Values.testFramework.securityContext }} + securityContext: {{ toYaml .Values.testFramework.securityContext | nindent 4 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 4 }} + {{- end }} + containers: + - name: {{ .Release.Name }}-test + image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" + imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}" + command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] + volumeMounts: + - mountPath: /tests + name: tests + readOnly: true + volumes: + - name: tests + configMap: + name: {{ template "grafana.fullname" . }}-test + restartPolicy: Never +{{- end }} diff --git a/charts/rancher-grafana/rancher-grafana/6.6.401/values.yaml b/charts/rancher-grafana/rancher-grafana/6.6.401/values.yaml new file mode 100755 index 000000000..9491c1a1f --- /dev/null +++ b/charts/rancher-grafana/rancher-grafana/6.6.401/values.yaml @@ -0,0 +1,732 @@ +global: + cattle: + systemDefaultRegistry: "" + +autoscaling: + enabled: false +rbac: + create: true + ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) + # useExistingRole: name-of-some-(cluster)role + pspEnabled: true + pspAnnotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + namespaced: false + extraRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] +serviceAccount: + create: true + name: + nameTest: +# annotations: +# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here + +replicas: 1 + +## See `kubectl explain poddisruptionbudget.spec` for more +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} +# minAvailable: 1 +# maxUnavailable: 1 + +## See `kubectl explain deployment.spec.strategy` for more +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +deploymentStrategy: + type: RollingUpdate + +readinessProbe: + httpGet: + path: /api/health + port: 3000 + +livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + failureThreshold: 10 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: "default-scheduler" + +image: + repository: rancher/mirrored-grafana-grafana + tag: 7.4.5 + sha: "" + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +testFramework: + enabled: true + image: "rancher/mirrored-bats-bats" + tag: "v1.1.0" + imagePullPolicy: IfNotPresent + securityContext: + runAsNonRoot: true + runAsUser: 1000 + +securityContext: + runAsNonRoot: true + runAsUser: 472 + runAsGroup: 472 + fsGroup: 472 + +containerSecurityContext: + {} + +extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # subPath: certificates.crt # (optional) + # configMap: certs-configmap + # readOnly: true + + +extraEmptyDirMounts: [] + # - name: provisioning-notifiers + # mountPath: /etc/grafana/provisioning/notifiers + + +# Apply extra labels to common labels. +extraLabels: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: + +downloadDashboardsImage: + repository: rancher/mirrored-curlimages-curl + tag: 7.73.0 + sha: "" + pullPolicy: IfNotPresent + +downloadDashboards: + env: {} + envFromSecret: "" + resources: {} + +## Pod Annotations +# podAnnotations: {} + +## Pod Labels +# podLabels: {} + +podPortName: grafana + +## Deployment annotations +# annotations: {} + +## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + port: 80 + targetPort: 3000 + # targetPort: 4181 To be used with a proxy extraContainer + annotations: {} + labels: {} + portName: service + +serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + +extraExposePorts: [] + # - name: keycloak + # port: 8080 + # targetPort: 8080 + # type: ClusterIP + +# overrides pod.spec.hostAliases in the grafana deployment's pods +hostAliases: [] + # - ip: "1.2.3.4" + # hostnames: + # - "my.host.com" + +ingress: + enabled: false + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # Values can be templated + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: / + + # pathType is only for k8s > 1.19 + pathType: Prefix + + hosts: + - chart-example.local + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## Or for k8s > 1.19 + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: service + + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +# +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Affinity for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +extraInitContainers: [] + +## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod +extraContainers: | +# - name: proxy +# image: quay.io/gambol99/keycloak-proxy:latest +# args: +# - -provider=github +# - -client-id= +# - -client-secret= +# - -github-org= +# - -email-domain=* +# - -cookie-secret= +# - -http-address=http://0.0.0.0:4181 +# - -upstream-url=http://127.0.0.1:3000 +# ports: +# - name: proxy-web +# containerPort: 4181 + +## Volumes that can be used in init containers that will not be mounted to deployment pods +extraContainerVolumes: [] +# - name: volume-from-secret +# secret: +# secretName: secret-to-mount +# - name: empty-dir-volume +# emptyDir: {} + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + type: pvc + enabled: false + # storageClassName: default + accessModes: + - ReadWriteOnce + size: 10Gi + # annotations: {} + finalizers: + - kubernetes.io/pvc-protection + # selectorLabels: {} + # subPath: "" + # existingClaim: + + ## If persistence is not enabled, this allows to mount the + ## local storage in-memory to improve performance + ## + inMemory: + enabled: false + ## The maximum usage on memory medium EmptyDir would be + ## the minimum value between the SizeLimit specified + ## here and the sum of memory limits of all containers in a pod + ## + # sizeLimit: 300Mi + +initChownData: + ## If false, data ownership will not be reset at startup + ## This allows the prometheus-server to be run with an arbitrary user + ## + enabled: true + + ## initChownData container image + ## + image: + repository: rancher/mirrored-library-busybox + tag: "1.31.1" + sha: "" + pullPolicy: IfNotPresent + + ## initChownData resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + +# Administrator credentials when not using an existing secret (see below) +adminUser: admin +# adminPassword: strongpassword + +# Use an existing secret for the admin user. +admin: + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + +## Define command to be executed at startup by grafana container +## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) +## Default is "run.sh" as defined in grafana's Dockerfile +# command: +# - "sh" +# - "/run.sh" + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Extra environment variables that will be pass onto deployment pods +## +## to provide grafana with access to CloudWatch on AWS EKS: +## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) +## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the +## same oidc eks provider as noted before (same as the existing line) +## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name +## +## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", +## +## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess +## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) +## +## env: +## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here +## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token +## AWS_REGION: us-east-1 +## +## 5. uncomment the EKS section in extraSecretMounts: below +## 6. uncomment the annotation section in the serviceAccount: above +## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn + +env: {} + +## "valueFrom" environment variable references that will be added to deployment pods +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core +## Renders in container spec as: +## env: +## ... +## - name: +## valueFrom: +## +envValueFrom: {} + +## The name of a secret in the same kubernetes namespace which contain values to be added to the environment +## This can be useful for auth tokens, etc. Value is templated. +envFromSecret: "" + +## Sensible environment variables that will be rendered as new secret object +## This can be useful for auth tokens, etc +envRenderSecret: {} + +## Additional grafana server secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # secretName: grafana-secret-files + # readOnly: true + # subPath: "" + # + # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) + # - name: aws-iam-token + # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount + # readOnly: true + # projected: + # defaultMode: 420 + # sources: + # - serviceAccountToken: + # audience: sts.amazonaws.com + # expirationSeconds: 86400 + # path: token + # + # for CSI e.g. Azure Key Vault use the following + # - name: secrets-store-inline + # mountPath: /run/secrets + # readOnly: true + # csi: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "akv-grafana-spc" + # nodePublishSecretRef: # Only required when using service principal mode + # name: grafana-akv-creds # Only required when using service principal mode + +## Additional grafana server volume mounts +# Defines additional volume mounts. +extraVolumeMounts: [] + # - name: extra-volume + # mountPath: /mnt/volume + # readOnly: true + # existingClaim: volume-claim + +## Pass the plugins you want installed as a list. +## +plugins: [] + # - digrich-bubblechart-panel + # - grafana-clock-panel + +## Configure grafana datasources +## ref: http://docs.grafana.org/administration/provisioning/#datasources +## +datasources: {} +# datasources.yaml: +# apiVersion: 1 +# datasources: +# - name: Prometheus +# type: prometheus +# url: http://prometheus-prometheus-server +# access: proxy +# isDefault: true +# - name: CloudWatch +# type: cloudwatch +# access: proxy +# uid: cloudwatch +# editable: false +# jsonData: +# authType: credentials +# defaultRegion: us-east-1 + +## Configure notifiers +## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels +## +notifiers: {} +# notifiers.yaml: +# notifiers: +# - name: email-notifier +# type: email +# uid: email1 +# # either: +# org_id: 1 +# # or +# org_name: Main Org. +# is_default: true +# settings: +# addresses: an_email_address@example.com +# delete_notifiers: + +## Configure grafana dashboard providers +## ref: http://docs.grafana.org/administration/provisioning/#dashboards +## +## `path` must be /var/lib/grafana/dashboards/ +## +dashboardProviders: {} +# dashboardproviders.yaml: +# apiVersion: 1 +# providers: +# - name: 'default' +# orgId: 1 +# folder: '' +# type: file +# disableDeletion: false +# editable: true +# options: +# path: /var/lib/grafana/dashboards/default + +## Configure grafana dashboard to import +## NOTE: To use dashboards you must also enable/configure dashboardProviders +## ref: https://grafana.com/dashboards +## +## dashboards per provider, use provider name as key. +## +dashboards: {} + # default: + # some-dashboard: + # json: | + # $RAW_JSON + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # token: '' + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # token: '' + # b64content: true + +## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. +## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. +## ConfigMap data example: +## +## data: +## example-dashboard.json: | +## RAW_JSON +## +dashboardsConfigMaps: {} +# default: "" + +## Grafana's primary configuration +## NOTE: values in map will be converted to ini format +## ref: http://docs.grafana.org/installation/configuration/ +## +grafana.ini: + paths: + data: /var/lib/grafana/data + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net +## grafana Authentication can be enabled with the following values on grafana.ini + # server: + # The full public facing url you use in browser, used for redirects and emails + # root_url: + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://api.github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: +## LDAP Authentication can be enabled with the following values on grafana.ini +## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + +## Grafana's LDAP configuration +## Templated by the template in _helpers.tpl +## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled +## ref: http://docs.grafana.org/installation/configuration/#auth-ldap +## ref: http://docs.grafana.org/installation/ldap/#configuration +ldap: + enabled: false + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + +## Grafana's SMTP configuration +## NOTE: To enable, grafana.ini must be configured with smtp.enabled +## ref: http://docs.grafana.org/installation/configuration/#smtp +smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" + +## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders +## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards +sidecar: + image: + repository: rancher/mirrored-kiwigrid-k8s-sidecar + tag: 1.10.7 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + dashboards: + enabled: false + SCProvider: true + # label that the configmaps with dashboards are marked with + label: grafana_dashboard + # value of label that the configmaps with dashboards are set to + labelValue: null + # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) + folder: /tmp/dashboards + # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead + defaultFolderName: null + # If specified, the sidecar will search for dashboard config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + # provider configuration that lets grafana manage the dashboards + provider: + # name of the provider, should be unique + name: sidecarProvider + # orgid as configured in grafana + orgid: 1 + # folder in which the dashboards should be imported in grafana + folder: '' + # type of the provider + type: file + # disableDelete to activate a import-only behaviour + disableDelete: false + # allow updating provisioned dashboards from the UI + allowUiUpdates: false + # allow Grafana to replicate dashboard structure from filesystem + foldersFromFilesStructure: false + datasources: + enabled: false + # label that the configmaps with datasources are marked with + label: grafana_datasource + # value of label that the configmaps with datasources are set to + labelValue: null + # If specified, the sidecar will search for datasource config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + + ## The name of a secret in the same kubernetes namespace which contain values to be added to the environment + ## This can be useful for database passwords, etc. Value is templated. + envFromSecret: "" + notifiers: + enabled: false + # label that the configmaps with notifiers are marked with + label: grafana_notifier + # If specified, the sidecar will search for notifier config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + +## Override the deployment namespace +## +namespaceOverride: "" + +## Number of old ReplicaSets to retain +## +revisionHistoryLimit: 10 + +## Add a seperate remote image renderer deployment/service +imageRenderer: + # Enable the image-renderer deployment & service + enabled: false + replicas: 1 + image: + # image-renderer Image repository + repository: rancher/mirrored-grafana-grafana-image-renderer + # image-renderer Image tag + tag: 2.0.1 + # image-renderer Image sha (optional) + sha: "" + # image-renderer ImagePullPolicy + pullPolicy: Always + # extra environment variables + env: + HTTP_HOST: "0.0.0.0" + # RENDERING_ARGS: --disable-gpu,--window-size=1280x758 + # RENDERING_MODE: clustered + # image-renderer deployment serviceAccount + serviceAccountName: "" + # image-renderer deployment securityContext + securityContext: {} + # image-renderer deployment Host Aliases + hostAliases: [] + # image-renderer deployment priority class + priorityClassName: '' + service: + # image-renderer service port name + portName: 'http' + # image-renderer service port used by both service and deployment + port: 8081 + targetPort: 8081 + # In case a sub_path is used this needs to be added to the image renderer callback + grafanaSubPath: "" + # name of the image-renderer port on the pod + podPortName: http + # number of image-renderer replica sets to keep + revisionHistoryLimit: 10 + networkPolicy: + # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods + limitIngress: true + # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods + limitEgress: false + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/Chart.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/Chart.yaml new file mode 100755 index 000000000..487e7401a --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/Chart.yaml @@ -0,0 +1,21 @@ +annotations: + catalog.cattle.io/auto-install: rancher-kiali-server-crd=1.32.100 + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: Istio + catalog.cattle.io/namespace: istio-system + catalog.cattle.io/os: linux + catalog.cattle.io/provides-gvr: networking.istio.io.virtualservice/v1beta1 + catalog.cattle.io/release-name: rancher-istio + catalog.cattle.io/requests-cpu: 710m + catalog.cattle.io/requests-memory: 2314Mi + catalog.cattle.io/ui-component: istio +apiVersion: v1 +appVersion: 1.8.5 +description: A basic Istio setup that installs with the istioctl. Refer to https://istio.io/latest/ + for details. +icon: https://charts.rancher.io/assets/logos/istio.svg +keywords: +- networking +- infrastructure +name: rancher-istio +version: 1.8.500 diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/README.md b/charts/rancher-istio-1.8/rancher-istio/1.8.500/README.md new file mode 100755 index 000000000..199e45312 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/README.md @@ -0,0 +1,69 @@ +# Rancher Istio Installers + +A Rancher created chart that packages the istioctl binary to install via a helm chart. + +# Installation Requirements + +## Chart Dependencies +- rancher-kiali-server-crd chart + +# Uninstallation Requirements +To ensure rancher-istio uninstalls correctly, you must uninstall rancher-istio prior to uninstalling chart dependencies (see installation requirements for chart dependencies). This is because all definitions need to be available in order to properly build the rancher-istio objects for removal. + +If you remove dependent CRD charts prior to removing rancher-istio, you may encounter the following error:: + +`Error: uninstallation completed with 1 error(s): unable to build kubernetes objects for delete: unable to recognize "": no matches for kind "MonitoringDashboard" in version "monitoring.kiali.io/v1alpha1"` + +# Addons + +## Kiali + +Kiali allows you to view and manage your istio-based service mesh through an easy to use dashboard. + +#### Dependencies +- rancher-monitoring chart or other Prometheus installation + +This dependecy installs the required CRDs for installing Kiali. Since Kiali is bundled in with Istio in this chart, if you do not have these dependencies installed, your Istio installation will fail. If you do not plan on using Kiali, set `kiali.enabled=false` when installing Istio for a succesful installation. + +> **Note:** The following configuration options assume you have installed the dependecies for Kiali. Please ensure you have Promtheus in your cluster before proceeding. + +The Monitoring app sets `prometheus.prometheusSpec.ignoreNamespaceSelectors=false` which means all namespaces will be scraped by Prometheus by default. This ensures you can view traffic, metrics and graphs for resources deployed in other namespaces. + +To limit scraping to specific namespaces, set `prometheus.prometheusSpec.ignoreNamespaceSelectors=true` and add one of the following configurations to ensure you can continue to view traffic, metrics and graphs for your deployed resources. + +1. Add a Service Monitor or Pod Monitor in the namespace with the targets you want to scrape. +1. Add an additionalScrapeConfig to your rancher-monitoring instance to scrape all targets in all namespaces. + +#### External Services + +##### Prometheus +The `kiali.external_services.prometheus` url is set in the values.yaml: +``` +http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc:{{ prometheus.service.port }} +``` +The url depends on the default values for `nameOverride`, `namespaceOverride`, and `prometheus.service.port` being set in your rancher-monitoring or other monitoring instance. + +##### Grafana +The `kiali.external_services.grafana` url is set in the values.yaml: +``` +http://{{ .Values.nameOverride }}-grafana.{{ .Values.namespaceOverride }}.svc:{{ grafana.service.port }} +``` +The url depends on the default values for `nameOverride`, `namespaceOverride`, and `grafana.service.port` being set in your rancher-monitoring or other monitoring instance. + +##### Tracing +The `kiali.external_services.tracing` url and `.Values.tracing.contextPath` is set in the rancher-istio values.yaml: +``` +http://tracing.{{ .Values.namespaceOverride }}.svc:{{ .Values.service.externalPort }}/{{ .Values.tracing.contextPath }} +``` +The url depends on the default values for `namespaceOverride`, and `.Values.service.externalPort` being set in your rancher-tracing or other tracing instance. + +## Jaeger + +Jaeger allows you to trace and monitor distributed microservices. + +> **Note:** This addon is using the all-in-one Jaeger installation which is not qualified for production. Use the [Jaeger Tracing](https://www.jaegertracing.io/docs/1.21/getting-started/) documentation to determine which installation you will need for your production needs. + +# Installation +``` +helm install rancher-istio . --create-namespace -n istio-system +``` \ No newline at end of file diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/app-readme.md b/charts/rancher-istio-1.8/rancher-istio/1.8.500/app-readme.md new file mode 100755 index 000000000..0e42df083 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/app-readme.md @@ -0,0 +1,45 @@ +# Rancher Istio + +Our [Istio](https://istio.io/) installer wraps the istioctl binary commands in a handy helm chart, including an overlay file option to allow complex customization. It also includes: +* **[Kiali](https://kiali.io/)**: Used for graphing traffic flow throughout the mesh +* **[Jaeger](https://www.jaegertracing.io/)**: A quick start, all-in-one installation used for tracing distributed systemm. This is not production qualified, please refer to jaeger documentation to determine which installation you may need instead. + +### Dependencies + +**Rancher Monitoring or other Prometheus installation** + +The Prometheus CRDs are required for installing Kiali which is enabled by default. If you do not have Prometheus installed your Istio installation will fail. If you do not plan on using Kiali, set `kiali.enabled=false` to bypass this requirement. + +### Customization + +**Rancher Monitoring** + +The Rancher Monitoring app sets `prometheus.prometheusSpec.ignoreNamespaceSelectors=false` which means all namespaces will be scraped by Prometheus by default. This ensures you can view traffic, metrics and graphs for resources deployed in other namespaces. + +To limit scraping to specific namespaces, set `prometheus.prometheusSpec.ignoreNamespaceSelectors=true` and add one of the following configurations to ensure you can continue to view traffic, metrics and graphs for your deployed resources. + +1. Add a Service Monitor or Pod Monitor in the namespace with the targets you want to scrape. +1. Add an additionalScrapeConfig to your rancher-monitoring instance to scrape all targets in all namespaces. + +**Custom Prometheus Installation with Kiali** + +To use a custom Monitoring installation, set the `kiali.external_services.prometheus` url in the values.yaml. This url depends on the values for `nameOverride`, `namespaceOverride`, and `prometheus.service.port` in your rancher-monitoring or other monitoring instance: +``` +http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc:{{ prometheus.service.port }} +``` +**Custom Grafana Installation with Kiali** + +To use a custom Grafana installation, set the `kiali.external_services.grafana` url in the values.yaml. This url depends on the values for `nameOverride`, `namespaceOverride`, and `granfa.service.port` in your rancher-monitoring or other grafana instance: +``` +http://{{ .Values.nameOverride }}-grafana.{{ .Values.namespaceOverride }}.svc:{{ grafana.service.port }} +``` +**Custom Tracing Installation with Kiali** + +To use a custom Tracing installation, set the `kiali.external_services.tracing` url and update the `.Values.tracing.contextPath` in the rancher-istio values.yaml. + +This url depends on the values for `namespaceOverride`, and `.Values.service.externalPort` in your rancher-tracing or other tracing instance.: +``` +http://tracing.{{ .Values.namespaceOverride }}.svc:{{ .Values.service.externalPort }}/{{ .Values.tracing.contextPath }} +``` + +For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/istio/v2.5/). diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/Chart.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/Chart.yaml new file mode 100755 index 000000000..9b6fdf385 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/Chart.yaml @@ -0,0 +1,31 @@ +annotations: + catalog.cattle.io/auto-install: rancher-kiali-server-crd=match + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.cattle.io/provides-gvr: monitoringdashboards.monitoring.kiali.io/v1alpha1 + catalog.cattle.io/requires-gvr: monitoring.coreos.com.prometheus/v1 + catalog.rancher.io/namespace: cattle-istio-system + catalog.rancher.io/release-name: rancher-kiali-server +apiVersion: v2 +appVersion: v1.32.0 +description: Kiali is an open source project for service mesh observability, refer + to https://www.kiali.io for details. This is installed as sub-chart with customized + values in Rancher's Istio. +home: https://github.com/kiali/kiali +icon: https://raw.githubusercontent.com/kiali/kiali.io/master/themes/kiali/static/img/kiali_logo_masthead.png +keywords: +- istio +- kiali +- networking +- infrastructure +maintainers: +- email: kiali-users@googlegroups.com + name: Kiali + url: https://kiali.io +name: kiali +sources: +- https://github.com/kiali/kiali +- https://github.com/kiali/kiali-ui +- https://github.com/kiali/kiali-operator +- https://github.com/kiali/helm-charts +version: 1.32.1 diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/NOTES.txt b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/NOTES.txt new file mode 100755 index 000000000..751019401 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/NOTES.txt @@ -0,0 +1,5 @@ +Welcome to Kiali! For more details on Kiali, see: https://kiali.io + +The Kiali Server [{{ .Chart.AppVersion }}] has been installed in namespace [{{ .Release.Namespace }}]. It will be ready soon. + +(Helm: Chart=[{{ .Chart.Name }}], Release=[{{ .Release.Name }}], Version=[{{ .Chart.Version }}]) diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/_helpers.tpl b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/_helpers.tpl new file mode 100755 index 000000000..dd33bbe48 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/_helpers.tpl @@ -0,0 +1,192 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "kiali-server.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kiali-server.fullname" -}} +{{- if .Values.fullnameOverride }} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} + {{- $name := default .Chart.Name .Values.nameOverride }} + {{- printf "%s" $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kiali-server.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Identifies the log_level with the old verbose_mode and the new log_level considered. +*/}} +{{- define "kiali-server.logLevel" -}} +{{- if .Values.deployment.verbose_mode -}} +{{- .Values.deployment.verbose_mode -}} +{{- else -}} +{{- .Values.deployment.logger.log_level -}} +{{- end -}} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "kiali-server.labels" -}} +helm.sh/chart: {{ include "kiali-server.chart" . }} +app: {{ include "kiali-server.name" . }} +{{ include "kiali-server.selectorLabels" . }} +version: {{ .Values.deployment.version_label | default .Chart.AppVersion | quote }} +app.kubernetes.io/version: {{ .Values.deployment.version_label | default .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/part-of: "kiali" +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kiali-server.selectorLabels" -}} +app.kubernetes.io/name: {{ include "kiali-server.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Used to determine if a custom dashboard (defined in .Template.Name) should be deployed. +*/}} +{{- define "kiali-server.isDashboardEnabled" -}} +{{- if .Values.external_services.custom_dashboards.enabled }} + {{- $includere := "" }} + {{- range $_, $s := .Values.deployment.custom_dashboards.includes }} + {{- if $s }} + {{- if $includere }} + {{- $includere = printf "%s|^%s$" $includere ($s | replace "*" ".*" | replace "?" ".") }} + {{- else }} + {{- $includere = printf "^%s$" ($s | replace "*" ".*" | replace "?" ".") }} + {{- end }} + {{- end }} + {{- end }} + {{- $excludere := "" }} + {{- range $_, $s := .Values.deployment.custom_dashboards.excludes }} + {{- if $s }} + {{- if $excludere }} + {{- $excludere = printf "%s|^%s$" $excludere ($s | replace "*" ".*" | replace "?" ".") }} + {{- else }} + {{- $excludere = printf "^%s$" ($s | replace "*" ".*" | replace "?" ".") }} + {{- end }} + {{- end }} + {{- end }} + {{- if (and (mustRegexMatch (default "no-matches" $includere) (base .Template.Name)) (not (mustRegexMatch (default "no-matches" $excludere) (base .Template.Name)))) }} + {{- print "enabled" }} + {{- else }} + {{- print "" }} + {{- end }} +{{- else }} + {{- print "" }} +{{- end }} +{{- end }} + +{{/* +Determine the default login token signing key. +*/}} +{{- define "kiali-server.login_token.signing_key" -}} +{{- if .Values.login_token.signing_key }} + {{- .Values.login_token.signing_key }} +{{- else }} + {{- randAlphaNum 16 }} +{{- end }} +{{- end }} + +{{/* +Determine the default web root. +*/}} +{{- define "kiali-server.server.web_root" -}} +{{- if .Values.server.web_root }} + {{- .Values.server.web_root | trimSuffix "/" }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- "/" }} + {{- else }} + {{- "/kiali" }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Determine the default identity cert file. There is no default if on k8s; only on OpenShift. +*/}} +{{- define "kiali-server.identity.cert_file" -}} +{{- if hasKey .Values.identity "cert_file" }} + {{- .Values.identity.cert_file }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- "/kiali-cert/tls.crt" }} + {{- else }} + {{- "" }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Determine the default identity private key file. There is no default if on k8s; only on OpenShift. +*/}} +{{- define "kiali-server.identity.private_key_file" -}} +{{- if hasKey .Values.identity "private_key_file" }} + {{- .Values.identity.private_key_file }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- "/kiali-cert/tls.key" }} + {{- else }} + {{- "" }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Determine the istio namespace - default is where Kiali is installed. +*/}} +{{- define "kiali-server.istio_namespace" -}} +{{- if .Values.istio_namespace }} + {{- .Values.istio_namespace }} +{{- else }} + {{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{/* +Determine the auth strategy to use - default is "token" on Kubernetes and "openshift" on OpenShift. +*/}} +{{- define "kiali-server.auth.strategy" -}} +{{- if .Values.auth.strategy }} + {{- if (and (eq .Values.auth.strategy "openshift") (not .Values.kiali_route_url)) }} + {{- fail "You did not define what the Kiali Route URL will be (--set kiali_route_url=...). Without this set, the openshift auth strategy will not work. Either set that or use a different auth strategy via the --set auth.strategy=... option." }} + {{- end }} + {{- .Values.auth.strategy }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- if not .Values.kiali_route_url }} + {{- fail "You did not define what the Kiali Route URL will be (--set kiali_route_url=...). Without this set, the openshift auth strategy will not work. Either set that or explicitly indicate another auth strategy you want via the --set auth.strategy=... option." }} + {{- end }} + {{- "openshift" }} + {{- else }} + {{- "token" }} + {{- end }} +{{- end }} +{{- end }} + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/cabundle.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/cabundle.yaml new file mode 100755 index 000000000..7462b95a7 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/cabundle.yaml @@ -0,0 +1,13 @@ +{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kiali-server.fullname" . }}-cabundle + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + annotations: + service.beta.openshift.io/inject-cabundle: "true" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/configmap.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/configmap.yaml new file mode 100755 index 000000000..b1bf53173 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/configmap.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +data: + config.yaml: | + {{- /* Most of .Values is simply the ConfigMap - strip out the keys that are not part of the ConfigMap */}} + {{- $cm := omit .Values "nameOverride" "fullnameOverride" "kiali_route_url" }} + {{- /* The helm chart defines namespace for us, but pass it to the ConfigMap in case the server needs it */}} + {{- $_ := set $cm.deployment "namespace" .Release.Namespace }} + {{- /* Some values of the ConfigMap are generated, but might not be identical, from .Values */}} + {{- $_ := set $cm "istio_namespace" (include "kiali-server.istio_namespace" .) }} + {{- $_ := set $cm.auth "strategy" (include "kiali-server.auth.strategy" .) }} + {{- $_ := set $cm.auth.openshift "client_id_prefix" (include "kiali-server.fullname" .) }} + {{- $_ := set $cm.identity "cert_file" (include "kiali-server.identity.cert_file" .) }} + {{- $_ := set $cm.identity "private_key_file" (include "kiali-server.identity.private_key_file" .) }} + {{- $_ := set $cm.login_token "signing_key" (include "kiali-server.login_token.signing_key" .) }} + {{- $_ := set $cm.server "web_root" (include "kiali-server.server.web_root" .) }} + {{- toYaml $cm | nindent 4 }} +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/envoy.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/envoy.yaml new file mode 100755 index 000000000..85b402017 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/envoy.yaml @@ -0,0 +1,56 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: envoy + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Envoy Metrics + discoverOn: "envoy_server_uptime" + items: + - chart: + name: "Pods uptime" + spans: 4 + metricName: "envoy_server_uptime" + dataType: "raw" + - chart: + name: "Allocated memory" + unit: "bytes" + spans: 4 + metricName: "envoy_server_memory_allocated" + dataType: "raw" + min: 0 + - chart: + name: "Heap size" + unit: "bytes" + spans: 4 + metricName: "envoy_server_memory_heap_size" + dataType: "raw" + min: 0 + - chart: + name: "Upstream active connections" + spans: 6 + metricName: "envoy_cluster_upstream_cx_active" + dataType: "raw" + - chart: + name: "Upstream total requests" + spans: 6 + metricName: "envoy_cluster_upstream_rq_total" + unit: "rps" + dataType: "rate" + - chart: + name: "Downstream active connections" + spans: 6 + metricName: "envoy_listener_downstream_cx_active" + dataType: "raw" + - chart: + name: "Downstream HTTP requests" + spans: 6 + metricName: "envoy_listener_http_downstream_rq" + unit: "rps" + dataType: "rate" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/go.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/go.yaml new file mode 100755 index 000000000..2d2f42a93 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/go.yaml @@ -0,0 +1,67 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: go + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Go Metrics + runtime: Go + discoverOn: "go_info" + items: + - chart: + name: "CPU ratio" + spans: 6 + metricName: "process_cpu_seconds_total" + dataType: "rate" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "RSS Memory" + unit: "bytes" + spans: 6 + metricName: "process_resident_memory_bytes" + dataType: "raw" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "Goroutines" + spans: 6 + metricName: "go_goroutines" + dataType: "raw" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "Heap allocation rate" + unit: "bytes/s" + spans: 6 + metricName: "go_memstats_alloc_bytes_total" + dataType: "rate" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "GC rate" + spans: 6 + metricName: "go_gc_duration_seconds_count" + dataType: "rate" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "Next GC" + unit: "bytes" + spans: 6 + metricName: "go_memstats_next_gc_bytes" + dataType: "raw" + aggregations: + - label: "pod_name" + displayName: "Pod" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/kiali.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/kiali.yaml new file mode 100755 index 000000000..b1f011b4f --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/kiali.yaml @@ -0,0 +1,44 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: kiali + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Kiali Internal Metrics + items: + - chart: + name: "API processing duration" + unit: "seconds" + spans: 6 + metricName: "kiali_api_processing_duration_seconds" + dataType: "histogram" + aggregations: + - label: "route" + displayName: "Route" + - chart: + name: "Functions processing duration" + unit: "seconds" + spans: 6 + metricName: "kiali_go_function_processing_duration_seconds" + dataType: "histogram" + aggregations: + - label: "function" + displayName: "Function" + - label: "package" + displayName: "Package" + - chart: + name: "Failures" + spans: 12 + metricName: "kiali_go_function_failures_total" + dataType: "raw" + aggregations: + - label: "function" + displayName: "Function" + - label: "package" + displayName: "Package" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml new file mode 100755 index 000000000..2e1ed5cff --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml @@ -0,0 +1,43 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: micrometer-1.0.6-jvm-pool + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: JVM + title: JVM Pool Metrics + discoverOn: "jvm_buffer_total_capacity_bytes" + items: + - chart: + name: "Pool buffer memory used" + unit: "bytes" + spans: 4 + metricName: "jvm_buffer_memory_used_bytes" + dataType: "raw" + aggregations: + - label: "id" + displayName: "Pool" + - chart: + name: "Pool buffer capacity" + unit: "bytes" + spans: 4 + metricName: "jvm_buffer_total_capacity_bytes" + dataType: "raw" + aggregations: + - label: "id" + displayName: "Pool" + - chart: + name: "Pool buffer count" + unit: "bytes" + spans: 4 + metricName: "jvm_buffer_count" + dataType: "raw" + aggregations: + - label: "id" + displayName: "Pool" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml new file mode 100755 index 000000000..d64596882 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml @@ -0,0 +1,65 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: micrometer-1.0.6-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: JVM + title: JVM Metrics + discoverOn: "jvm_threads_live" + items: + - chart: + name: "Total live threads" + spans: 4 + metricName: "jvm_threads_live" + dataType: "raw" + - chart: + name: "Daemon threads" + spans: 4 + metricName: "jvm_threads_daemon" + dataType: "raw" + - chart: + name: "Loaded classes" + spans: 4 + metricName: "jvm_classes_loaded" + dataType: "raw" + + - chart: + name: "Memory used" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_used_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory commited" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_committed_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory max" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_max_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml new file mode 100755 index 000000000..76e8d0a4a --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml @@ -0,0 +1,68 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: micrometer-1.1-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: JVM + title: JVM Metrics + discoverOn: "jvm_threads_live_threads" + items: + - chart: + name: "Memory used" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_used_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory commited" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_committed_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory max" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_max_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + + - chart: + name: "Total live threads" + spans: 4 + metricName: "jvm_threads_live_threads" + dataType: "raw" + - chart: + name: "Daemon threads" + spans: 4 + metricName: "jvm_threads_daemon_threads" + dataType: "raw" + - chart: + name: "Threads states" + spans: 4 + metricName: "jvm_threads_states_threads" + dataType: "raw" + aggregations: + - label: "state" + displayName: "State" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/microprofile-1.1.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/microprofile-1.1.yaml new file mode 100755 index 000000000..1d4951196 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/microprofile-1.1.yaml @@ -0,0 +1,59 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: microprofile-1.1 + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: MicroProfile Metrics + runtime: MicroProfile + discoverOn: "base:thread_count" + items: + - chart: + name: "Current loaded classes" + spans: 6 + metricName: "base:classloader_current_loaded_class_count" + dataType: "raw" + - chart: + name: "Unloaded classes" + spans: 6 + metricName: "base:classloader_total_unloaded_class_count" + dataType: "raw" + - chart: + name: "Thread count" + spans: 4 + metricName: "base:thread_count" + dataType: "raw" + - chart: + name: "Thread max count" + spans: 4 + metricName: "base:thread_max_count" + dataType: "raw" + - chart: + name: "Thread daemon count" + spans: 4 + metricName: "base:thread_daemon_count" + dataType: "raw" + - chart: + name: "Committed heap" + unit: "bytes" + spans: 4 + metricName: "base:memory_committed_heap_bytes" + dataType: "raw" + - chart: + name: "Max heap" + unit: "bytes" + spans: 4 + metricName: "base:memory_max_heap_bytes" + dataType: "raw" + - chart: + name: "Used heap" + unit: "bytes" + spans: 4 + metricName: "base:memory_used_heap_bytes" + dataType: "raw" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/microprofile-x.y.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/microprofile-x.y.yaml new file mode 100755 index 000000000..57ddc60ef --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/microprofile-x.y.yaml @@ -0,0 +1,38 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: microprofile-x.y + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: MicroProfile Metrics + runtime: MicroProfile + discoverOn: "base:gc_complete_scavenger_count" + items: + - chart: + name: "Young GC time" + unit: "seconds" + spans: 3 + metricName: "base:gc_young_generation_scavenger_time_seconds" + dataType: "raw" + - chart: + name: "Young GC count" + spans: 3 + metricName: "base:gc_young_generation_scavenger_count" + dataType: "raw" + - chart: + name: "Total GC time" + unit: "seconds" + spans: 3 + metricName: "base:gc_complete_scavenger_time_seconds" + dataType: "raw" + - chart: + name: "Total GC count" + spans: 3 + metricName: "base:gc_complete_scavenger_count" + dataType: "raw" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/nodejs.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/nodejs.yaml new file mode 100755 index 000000000..1ffe0aa10 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/nodejs.yaml @@ -0,0 +1,59 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: nodejs + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Node.js + title: Node.js Metrics + discoverOn: "nodejs_active_handles_total" + items: + - chart: + name: "Active handles" + spans: 4 + metricName: "nodejs_active_handles_total" + dataType: "raw" + - chart: + name: "Active requests" + spans: 4 + metricName: "nodejs_active_requests_total" + dataType: "raw" + - chart: + name: "Event loop lag" + unit: "seconds" + spans: 4 + metricName: "nodejs_eventloop_lag_seconds" + dataType: "raw" + - chart: + name: "Total heap size" + unit: "bytes" + spans: 12 + metricName: "nodejs_heap_space_size_total_bytes" + dataType: "raw" + aggregations: + - label: "space" + displayName: "Space" + - chart: + name: "Used heap size" + unit: "bytes" + spans: 6 + metricName: "nodejs_heap_space_size_used_bytes" + dataType: "raw" + aggregations: + - label: "space" + displayName: "Space" + - chart: + name: "Available heap size" + unit: "bytes" + spans: 6 + metricName: "nodejs_heap_space_size_available_bytes" + dataType: "raw" + aggregations: + - label: "space" + displayName: "Space" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/quarkus.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/quarkus.yaml new file mode 100755 index 000000000..cef5f3dce --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/quarkus.yaml @@ -0,0 +1,33 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: quarkus + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Quarkus Metrics + runtime: Quarkus + items: + - chart: + name: "Thread count" + spans: 4 + metricName: "vendor:thread_count" + dataType: "raw" + - chart: + name: "Used heap" + unit: "bytes" + spans: 4 + metricName: "vendor:memory_heap_usage_bytes" + dataType: "raw" + - chart: + name: "Used non-heap" + unit: "bytes" + spans: 4 + metricName: "vendor:memory_non_heap_usage_bytes" + dataType: "raw" + - include: "microprofile-x.y" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml new file mode 100755 index 000000000..42d87d890 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: springboot-jvm-pool + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Spring Boot + title: JVM Pool Metrics + items: + - include: "micrometer-1.0.6-jvm-pool" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/springboot-jvm.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/springboot-jvm.yaml new file mode 100755 index 000000000..ced3acdd9 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/springboot-jvm.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: springboot-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Spring Boot + title: JVM Metrics + items: + - include: "micrometer-1.0.6-jvm" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/springboot-tomcat.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/springboot-tomcat.yaml new file mode 100755 index 000000000..c07016aa2 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/springboot-tomcat.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: springboot-tomcat + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Spring Boot + title: Tomcat Metrics + items: + - include: "tomcat" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/thorntail.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/thorntail.yaml new file mode 100755 index 000000000..6bd85e6f5 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/thorntail.yaml @@ -0,0 +1,22 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: thorntail + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Thorntail + title: Thorntail Metrics + discoverOn: "vendor:loaded_modules" + items: + - include: "microprofile-1.1" + - chart: + name: "Loaded modules" + spans: 6 + metricName: "vendor:loaded_modules" + dataType: "raw" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/tomcat.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/tomcat.yaml new file mode 100755 index 000000000..9a803342f --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/tomcat.yaml @@ -0,0 +1,67 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: tomcat + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Tomcat + title: Tomcat Metrics + discoverOn: "tomcat_sessions_created_total" + items: + - chart: + name: "Sessions created" + spans: 4 + metricName: "tomcat_sessions_created_total" + dataType: "raw" + - chart: + name: "Active sessions" + spans: 4 + metricName: "tomcat_sessions_active_current" + dataType: "raw" + - chart: + name: "Sessions rejected" + spans: 4 + metricName: "tomcat_sessions_rejected_total" + dataType: "raw" + + - chart: + name: "Bytes sent" + unit: "bitrate" + spans: 6 + metricName: "tomcat_global_sent_bytes_total" + dataType: "rate" + aggregations: + - label: "name" + displayName: "Name" + - chart: + name: "Bytes received" + unit: "bitrate" + spans: 6 + metricName: "tomcat_global_received_bytes_total" + dataType: "rate" + aggregations: + - label: "name" + displayName: "Name" + + - chart: + name: "Global errors" + spans: 6 + metricName: "tomcat_global_error_total" + dataType: "raw" + aggregations: + - label: "name" + displayName: "Name" + - chart: + name: "Servlet errors" + spans: 6 + metricName: "tomcat_servlet_error_total" + dataType: "raw" + aggregations: + - label: "name" + displayName: "Name" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-client.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-client.yaml new file mode 100755 index 000000000..2d591d6b0 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-client.yaml @@ -0,0 +1,60 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-client + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Client Metrics + discoverOn: "vertx_http_client_connections" + items: + - chart: + name: "Client response time" + unit: "seconds" + spans: 6 + metricName: "vertx_http_client_responseTime_seconds" + dataType: "histogram" + aggregations: + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Client request count rate" + unit: "ops" + spans: 6 + metricName: "vertx_http_client_requestCount_total" + dataType: "rate" + aggregations: + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Client active connections" + spans: 6 + metricName: "vertx_http_client_connections" + dataType: "raw" + - chart: + name: "Client active websockets" + spans: 6 + metricName: "vertx_http_client_wsConnections" + dataType: "raw" + - chart: + name: "Client bytes sent" + unit: "bytes" + spans: 6 + metricName: "vertx_http_client_bytesSent" + dataType: "histogram" + - chart: + name: "Client bytes received" + unit: "bytes" + spans: 6 + metricName: "vertx_http_client_bytesReceived" + dataType: "histogram" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-eventbus.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-eventbus.yaml new file mode 100755 index 000000000..65f9ee2ec --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-eventbus.yaml @@ -0,0 +1,59 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-eventbus + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Eventbus Metrics + discoverOn: "vertx_eventbus_handlers" + items: + - chart: + name: "Event bus handlers" + spans: 6 + metricName: "vertx_eventbus_handlers" + dataType: "raw" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus pending messages" + spans: 6 + metricName: "vertx_eventbus_pending" + dataType: "raw" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus processing time" + unit: "seconds" + spans: 6 + metricName: "vertx_eventbus_processingTime_seconds" + dataType: "histogram" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus bytes read" + unit: "bytes" + spans: 6 + metricName: "vertx_eventbus_bytesRead" + dataType: "histogram" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus bytes written" + unit: "bytes" + spans: 6 + metricName: "vertx_eventbus_bytesWritten" + dataType: "histogram" + aggregations: + - label: "address" + displayName: "Eventbus address" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-jvm.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-jvm.yaml new file mode 100755 index 000000000..2663186f3 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-jvm.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: JVM Metrics + items: + - include: "micrometer-1.1-jvm" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-pool.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-pool.yaml new file mode 100755 index 000000000..f6af921b3 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-pool.yaml @@ -0,0 +1,68 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-pool + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Pools Metrics + discoverOn: "vertx_pool_ratio" + items: + - chart: + name: "Usage duration" + unit: "seconds" + spans: 6 + metricName: "vertx_pool_usage_seconds" + dataType: "histogram" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Usage ratio" + spans: 6 + metricName: "vertx_pool_ratio" + dataType: "raw" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Queue size" + spans: 6 + metricName: "vertx_pool_queue_size" + dataType: "raw" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Time in queue" + unit: "seconds" + spans: 6 + metricName: "vertx_pool_queue_delay_seconds" + dataType: "histogram" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Resources used" + spans: 6 + metricName: "vertx_pool_inUse" + dataType: "raw" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-server.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-server.yaml new file mode 100755 index 000000000..de6b89df9 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/dashboards/vertx-server.yaml @@ -0,0 +1,62 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-server + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Server Metrics + discoverOn: "vertx_http_server_connections" + items: + - chart: + name: "Server response time" + unit: "seconds" + spans: 6 + metricName: "vertx_http_server_responseTime_seconds" + dataType: "histogram" + aggregations: + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Server request count rate" + unit: "ops" + spans: 6 + metricName: "vertx_http_server_requestCount_total" + dataType: "rate" + aggregations: + - label: "code" + displayName: "Error code" + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Server active connections" + spans: 6 + metricName: "vertx_http_server_connections" + dataType: "raw" + - chart: + name: "Server active websockets" + spans: 6 + metricName: "vertx_http_server_wsConnections" + dataType: "raw" + - chart: + name: "Server bytes sent" + unit: "bytes" + spans: 6 + metricName: "vertx_http_server_bytesSent" + dataType: "histogram" + - chart: + name: "Server bytes received" + unit: "bytes" + spans: 6 + metricName: "vertx_http_server_bytesReceived" + dataType: "histogram" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/deployment.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/deployment.yaml new file mode 100755 index 000000000..100c57922 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/deployment.yaml @@ -0,0 +1,174 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.deployment.replicas }} + selector: + matchLabels: + {{- include "kiali-server.selectorLabels" . | nindent 6 }} + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + name: {{ include "kiali-server.fullname" . }} + labels: + {{- include "kiali-server.labels" . | nindent 8 }} + {{- if .Values.deployment.pod_labels }} + {{- toYaml .Values.deployment.pod_labels | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.server.metrics_enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ .Values.server.metrics_port | quote }} + {{- else }} + prometheus.io/scrape: "false" + prometheus.io/port: "" + {{- end }} + kiali.io/runtimes: go,kiali + {{- if .Values.deployment.pod_annotations }} + {{- toYaml .Values.deployment.pod_annotations | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "kiali-server.fullname" . }} + {{- if .Values.deployment.priority_class_name }} + priorityClassName: {{ .Values.deployment.priority_class_name | quote }} + {{- end }} + {{- if .Values.deployment.image_pull_secrets }} + imagePullSecrets: + {{- range .Values.deployment.image_pull_secrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - image: "{{ template "system_default_registry" . }}{{ .Values.deployment.repository }}:{{ .Values.deployment.tag }}" + imagePullPolicy: {{ .Values.deployment.image_pull_policy | default "Always" }} + name: {{ include "kiali-server.fullname" . }} + command: + - "/opt/kiali/kiali" + - "-config" + - "/kiali-configuration/config.yaml" + ports: + - name: api-port + containerPort: {{ .Values.server.port | default 20001 }} + {{- if .Values.server.metrics_enabled }} + - name: http-metrics + containerPort: {{ .Values.server.metrics_port | default 9090 }} + {{- end }} + readinessProbe: + httpGet: + path: {{ include "kiali-server.server.web_root" . | trimSuffix "/" }}/healthz + port: api-port + {{- if (include "kiali-server.identity.cert_file" .) }} + scheme: HTTPS + {{- else }} + scheme: HTTP + {{- end }} + initialDelaySeconds: 5 + periodSeconds: 30 + livenessProbe: + httpGet: + path: {{ include "kiali-server.server.web_root" . | trimSuffix "/" }}/healthz + port: api-port + {{- if (include "kiali-server.identity.cert_file" .) }} + scheme: HTTPS + {{- else }} + scheme: HTTP + {{- end }} + initialDelaySeconds: 5 + periodSeconds: 30 + env: + - name: ACTIVE_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "{{ include "kiali-server.logLevel" . }}" + - name: LOG_FORMAT + value: "{{ .Values.deployment.logger.log_format }}" + - name: LOG_TIME_FIELD_FORMAT + value: "{{ .Values.deployment.logger.time_field_format }}" + - name: LOG_SAMPLER_RATE + value: "{{ .Values.deployment.logger.sampler_rate }}" + volumeMounts: + {{- if .Values.web_root_override }} + - name: kiali-console + subPath: env.js + mountPath: /opt/kiali/console/env.js + {{- end }} + - name: {{ include "kiali-server.fullname" . }}-configuration + mountPath: "/kiali-configuration" + - name: {{ include "kiali-server.fullname" . }}-cert + mountPath: "/kiali-cert" + - name: {{ include "kiali-server.fullname" . }}-secret + mountPath: "/kiali-secret" + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + - name: {{ include "kiali-server.fullname" . }}-cabundle + mountPath: "/kiali-cabundle" + {{- end }} + {{- if .Values.deployment.resources }} + resources: + {{- toYaml .Values.deployment.resources | nindent 10 }} + {{- end }} + volumes: + {{- if .Values.web_root_override }} + - name: kiali-console + configMap: + name: kiali-console + items: + - key: env.js + path: env.js + {{- end }} + - name: {{ include "kiali-server.fullname" . }}-configuration + configMap: + name: {{ include "kiali-server.fullname" . }} + - name: {{ include "kiali-server.fullname" . }}-cert + secret: + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + secretName: {{ include "kiali-server.fullname" . }}-cert-secret + {{- else }} + secretName: istio.{{ include "kiali-server.fullname" . }}-service-account + {{- end }} + {{- if not (include "kiali-server.identity.cert_file" .) }} + optional: true + {{- end }} + - name: {{ include "kiali-server.fullname" . }}-secret + secret: + secretName: {{ .Values.deployment.secret_name }} + optional: true + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + - name: {{ include "kiali-server.fullname" . }}-cabundle + configMap: + name: {{ include "kiali-server.fullname" . }}-cabundle + {{- end }} + {{- if or (.Values.deployment.affinity.node) (or (.Values.deployment.pod) (.Values.deployment.pod_anti)) }} + affinity: + {{- if .Values.deployment.affinity.node }} + nodeAffinity: + {{- toYaml .Values.deployment.affinity.node | nindent 10 }} + {{- end }} + {{- if .Values.deployment.affinity.pod }} + podAffinity: + {{- toYaml .Values.deployment.affinity.pod | nindent 10 }} + {{- end }} + {{- if .Values.deployment.affinity.pod_anti }} + podAntiAffinity: + {{- toYaml .Values.deployment.affinity.pod_anti | nindent 10 }} + {{- end }} + {{- end }} + {{- if .Values.deployment.tolerations }} + tolerations: + {{- toYaml .Values.deployment.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.deployment.node_selector }} + nodeSelector: + {{- toYaml .Values.deployment.node_selector | nindent 8 }} + {{- end }} +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/hpa.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/hpa.yaml new file mode 100755 index 000000000..934c4c1e9 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/hpa.yaml @@ -0,0 +1,17 @@ +{{- if .Values.deployment.hpa.spec }} +--- +apiVersion: {{ .Values.deployment.hpa.api_version }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "kiali-server.fullname" . }} + {{- toYaml .Values.deployment.hpa.spec | nindent 2 }} +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/ingress.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/ingress.yaml new file mode 100755 index 000000000..e4c98db1b --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/ingress.yaml @@ -0,0 +1,40 @@ +{{- if not (.Capabilities.APIVersions.Has "route.openshift.io/v1") }} +{{- if .Values.deployment.ingress_enabled }} +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + annotations: + {{- if hasKey .Values.deployment.override_ingress_yaml.metadata "annotations" }} + {{- toYaml .Values.deployment.override_ingress_yaml.metadata.annotations | nindent 4 }} + {{- else }} + # For ingress-nginx versions older than 0.20.0 use secure-backends. + # (see: https://github.com/kubernetes/ingress-nginx/issues/3416#issuecomment-438247948) + # For ingress-nginx versions 0.20.0 and later use backend-protocol. + {{- if (include "kiali-server.identity.cert_file" .) }} + nginx.ingress.kubernetes.io/secure-backends: "true" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + {{- else }} + nginx.ingress.kubernetes.io/secure-backends: "false" + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" + {{- end }} + {{- end }} +spec: + {{- if hasKey .Values.deployment.override_ingress_yaml "spec" }} + {{- toYaml .Values.deployment.override_ingress_yaml.spec | nindent 2 }} + {{- else }} + rules: + - http: + paths: + - path: {{ include "kiali-server.server.web_root" . }} + backend: + serviceName: {{ include "kiali-server.fullname" . }} + servicePort: {{ .Values.server.port }} + {{- end }} +... +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/oauth.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/oauth.yaml new file mode 100755 index 000000000..a178bb85e --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/oauth.yaml @@ -0,0 +1,17 @@ +{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} +{{- if .Values.kiali_route_url }} +--- +apiVersion: oauth.openshift.io/v1 +kind: OAuthClient +metadata: + name: {{ include "kiali-server.fullname" . }}-{{ .Release.Namespace }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +redirectURIs: +- {{ .Values.kiali_route_url }} +grantMethod: auto +allowAnyScope: true +... +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/psp.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/psp.yaml new file mode 100755 index 000000000..f891892cc --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/psp.yaml @@ -0,0 +1,67 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "kiali-server.fullname" . }}-psp + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "kiali-server.fullname" . }}-psp +subjects: + - kind: ServiceAccount + name: kiali +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "kiali-server.fullname" . }}-psp + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: + - policy + resourceNames: + - {{ include "kiali-server.fullname" . }}-psp + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "kiali-server.fullname" . }}-psp + namespace: {{ .Release.Namespace }} +spec: + allowPrivilegeEscalation: false + forbiddenSysctls: + - '*' + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + runAsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI + - persistentVolumeClaim +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/role-controlplane.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/role-controlplane.yaml new file mode 100755 index 000000000..a22c76756 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/role-controlplane.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "kiali-server.fullname" . }}-controlplane + namespace: {{ include "kiali-server.istio_namespace" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: + - secrets + verbs: + - list +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/role-viewer.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/role-viewer.yaml new file mode 100755 index 000000000..9fdd9fd1d --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/role-viewer.yaml @@ -0,0 +1,97 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "kiali-server.fullname" . }}-viewer + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: + - configmaps + - endpoints + - namespaces + - nodes + - pods + - pods/log + - pods/proxy + - replicationcontrollers + - services + verbs: + - get + - list + - watch +- apiGroups: [""] + resources: + - pods/portforward + verbs: + - create + - post +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch +- apiGroups: ["batch"] + resources: + - cronjobs + - jobs + verbs: + - get + - list + - watch +- apiGroups: + - networking.istio.io + - security.istio.io + resources: ["*"] + verbs: + - get + - list + - watch +- apiGroups: ["apps.openshift.io"] + resources: + - deploymentconfigs + verbs: + - get + - list + - watch +- apiGroups: ["project.openshift.io"] + resources: + - projects + verbs: + - get +- apiGroups: ["route.openshift.io"] + resources: + - routes + verbs: + - get +- apiGroups: ["monitoring.kiali.io"] + resources: + - monitoringdashboards + verbs: + - get + - list +- apiGroups: ["iter8.tools"] + resources: + - experiments + verbs: + - get + - list +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: + - create +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/role.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/role.yaml new file mode 100755 index 000000000..8444bc753 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/role.yaml @@ -0,0 +1,108 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "kiali-server.fullname" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: + - configmaps + - endpoints + - namespaces + - nodes + - pods + - pods/log + - pods/proxy + - replicationcontrollers + - services + verbs: + - get + - list + - patch + - watch +- apiGroups: [""] + resources: + - pods/portforward + verbs: + - create + - post +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - patch + - watch +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch +- apiGroups: ["batch"] + resources: + - cronjobs + - jobs + verbs: + - get + - list + - patch + - watch +- apiGroups: + - networking.istio.io + - security.istio.io + resources: ["*"] + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: ["apps.openshift.io"] + resources: + - deploymentconfigs + verbs: + - get + - list + - patch + - watch +- apiGroups: ["project.openshift.io"] + resources: + - projects + verbs: + - get +- apiGroups: ["route.openshift.io"] + resources: + - routes + verbs: + - get +- apiGroups: ["monitoring.kiali.io"] + resources: + - monitoringdashboards + verbs: + - get + - list +- apiGroups: ["iter8.tools"] + resources: + - experiments + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: + - create +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/rolebinding-controlplane.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/rolebinding-controlplane.yaml new file mode 100755 index 000000000..5a0015836 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/rolebinding-controlplane.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "kiali-server.fullname" . }}-controlplane + namespace: {{ include "kiali-server.istio_namespace" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "kiali-server.fullname" . }}-controlplane +subjects: +- kind: ServiceAccount + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/rolebinding.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/rolebinding.yaml new file mode 100755 index 000000000..1eaabd65f --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/rolebinding.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "kiali-server.fullname" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + {{- if .Values.deployment.view_only_mode }} + name: {{ include "kiali-server.fullname" . }}-viewer + {{- else }} + name: {{ include "kiali-server.fullname" . }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/route.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/route.yaml new file mode 100755 index 000000000..27940dc96 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/route.yaml @@ -0,0 +1,30 @@ +{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} +{{- if .Values.deployment.ingress_enabled }} +# As of OpenShift 4.5, need to use --disable-openapi-validation when installing via Helm +--- +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + {{- if hasKey .Values.deployment.override_ingress_yaml.metadata "annotations" }}} + annotations: + {{- toYaml .Values.deployment.override_ingress_yaml.metadata.annotations | nindent 4 }} + {{- end }} +spec: + {{- if hasKey .Values.deployment.override_ingress_yaml "spec" }} + {{- toYaml .Values.deployment.override_ingress_yaml.spec | nindent 2 }} + {{- else }} + tls: + termination: reencrypt + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + targetPort: {{ .Values.server.port }} + name: {{ include "kiali-server.fullname" . }} + {{- end }} +... +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/service.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/service.yaml new file mode 100755 index 000000000..9ccf4f388 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/service.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + annotations: + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + service.beta.openshift.io/serving-cert-secret-name: {{ include "kiali-server.fullname" . }}-cert-secret + {{- end }} + kiali.io/api-spec: https://kiali.io/api + kiali.io/api-type: rest + {{- if and (not (empty .Values.server.web_fqdn)) (not (empty .Values.server.web_schema)) }} + {{- if empty .Values.server.web_port }} + kiali.io/external-url: {{ .Values.server.web_schema }}://{{ .Values.server.web_fqdn }}{{ default "" .Values.server.web_root }} + {{- else }} + kiali.io/external-url: {{ .Values.server.web_schema }}://{{ .Values.server.web_fqdn }}:{{ .Values.server.web_port }}{{(default "" .Values.server.web_root) }} + {{- end }} + {{- end }} + {{- if .Values.deployment.service_annotations }} + {{- toYaml .Values.deployment.service_annotations | nindent 4 }} + {{- end }} +spec: + {{- if .Values.deployment.service_type }} + type: {{ .Values.deployment.service_type }} + {{- end }} + ports: + {{- if (include "kiali-server.identity.cert_file" .) }} + - name: tcp + {{- else }} + - name: http + {{- end }} + protocol: TCP + port: {{ .Values.server.port }} + {{- if .Values.server.metrics_enabled }} + - name: http-metrics + protocol: TCP + port: {{ .Values.server.metrics_port }} + {{- end }} + selector: + {{- include "kiali-server.selectorLabels" . | nindent 4 }} + {{- if .Values.deployment.additional_service_yaml }} + {{- toYaml .Values.deployment.additional_service_yaml | nindent 2 }} + {{- end }} +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/serviceaccount.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/serviceaccount.yaml new file mode 100755 index 000000000..9151b6f6a --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/serviceaccount.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/validate-install-crd.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/validate-install-crd.yaml new file mode 100755 index 000000000..b42eeb266 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/validate-install-crd.yaml @@ -0,0 +1,14 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +# {{- $found := dict -}} +# {{- set $found "monitoring.kiali.io/v1alpha1/MonitoringDashboard" false -}} +# {{- range .Capabilities.APIVersions -}} +# {{- if hasKey $found (toString .) -}} +# {{- set $found (toString .) true -}} +# {{- end -}} +# {{- end -}} +# {{- range $_, $exists := $found -}} +# {{- if (eq $exists false) -}} +# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}} +# {{- end -}} +# {{- end -}} +#{{- end -}} \ No newline at end of file diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/web-root-configmap.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/web-root-configmap.yaml new file mode 100755 index 000000000..970d4e4f5 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/templates/web-root-configmap.yaml @@ -0,0 +1,12 @@ +{{- if .Values.web_root_override }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: kiali-console + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +data: + env.js: | + window.WEB_ROOT='/k8s/clusters/{{ .Values.global.cattle.clusterId }}/api/v1/namespaces/{{ .Release.Namespace }}/services/http:kiali:20001/proxy/kiali'; +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/values.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/values.yaml new file mode 100755 index 000000000..aada4e09a --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/kiali/values.yaml @@ -0,0 +1,93 @@ +nameOverride: "kiali" +fullnameOverride: "kiali" + +# This is required for "openshift" auth strategy. +# You have to know ahead of time what your Route URL will be because +# right now the helm chart can't figure this out at runtime (it would +# need to wait for the Kiali Route to be deployed and for OpenShift +# to start it up). If someone knows how to update this helm chart to +# do this, a PR would be welcome. +kiali_route_url: "" + +# rancher specific override that allows proxy access to kiali url +web_root_override: true + +# +# Settings that mimic the Kiali CR which are placed in the ConfigMap. +# Note that only those values used by the Helm Chart will be here. +# + +istio_namespace: "" # default is where Kiali is installed + +auth: + openid: {} + openshift: {} + strategy: "" + +deployment: + # This only limits what Kiali will attempt to see, but Kiali Service Account has permissions to see everything. + # For more control over what the Kial Service Account can see, use the Kiali Operator + accessible_namespaces: + - "**" + additional_service_yaml: {} + affinity: + node: {} + pod: {} + pod_anti: {} + custom_dashboards: + excludes: [''] + includes: ['*'] + hpa: + api_version: "autoscaling/v2beta2" + spec: {} + repository: rancher/mirrored-kiali-kiali + image_pull_policy: "Always" + image_pull_secrets: [] + tag: v1.32.0 + ingress_enabled: true + logger: + log_format: "text" + log_level: "info" + time_field_format: "2006-01-02T15:04:05Z07:00" + sampler_rate: "1" + node_selector: {} + override_ingress_yaml: + metadata: {} + pod_annotations: {} + pod_labels: {} + priority_class_name: "" + replicas: 1 + resources: {} + secret_name: "kiali" + service_annotations: {} + service_type: "" + tolerations: [] + version_label: v1.32.0 + view_only_mode: false + +external_services: + custom_dashboards: + enabled: true + +identity: {} + #cert_file: + #private_key_file: + +login_token: + signing_key: "" + +server: + port: 20001 + metrics_enabled: true + metrics_port: 9090 + web_root: "" + +# Common settings used among istio subcharts. +global: + # Specify rancher clusterId of external tracing config + # https://github.com/istio/istio.io/issues/4146#issuecomment-493543032 + cattle: + systemDefaultRegistry: "" + clusterId: + rbac: + pspEnabled: false diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/.helmignore b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/Chart.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/Chart.yaml new file mode 100755 index 000000000..6e368616d --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/Chart.yaml @@ -0,0 +1,12 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: istio-system + catalog.rancher.io/release-name: rancher-tracing +apiVersion: v1 +appVersion: 1.20.0 +description: A quick start Jaeger Tracing installation using the all-in-one demo. + This is not production qualified. Refer to https://www.jaegertracing.io/ for details. +name: tracing +version: 1.20.1 diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/README.md b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/README.md new file mode 100755 index 000000000..25534c628 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/README.md @@ -0,0 +1,5 @@ +# Jaeger + +A Rancher chart based on the Jaeger all-in-one quick installation option. This chart will allow you to trace and monitor distributed microservices. + +> **Note:** The basic all-in-one Jaeger installation which is not qualified for production. Use the [Jaeger Tracing](https://www.jaegertracing.io) documentation to determine which installation you will need for your production needs. diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/_affinity.tpl b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/_affinity.tpl new file mode 100755 index 000000000..bf6a9aee5 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/_affinity.tpl @@ -0,0 +1,92 @@ +{{/* affinity - https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ */}} +{{- define "nodeAffinity" }} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + {{- include "nodeAffinityRequiredDuringScheduling" . }} + preferredDuringSchedulingIgnoredDuringExecution: + {{- include "nodeAffinityPreferredDuringScheduling" . }} +{{- end }} + +{{- define "nodeAffinityRequiredDuringScheduling" }} + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + {{- range $key, $val := .Values.global.arch }} + {{- if gt ($val | int) 0 }} + - {{ $key | quote }} + {{- end }} + {{- end }} + {{- $nodeSelector := default .Values.global.defaultNodeSelector .Values.nodeSelector -}} + {{- range $key, $val := $nodeSelector }} + - key: {{ $key }} + operator: In + values: + - {{ $val | quote }} + {{- end }} +{{- end }} + +{{- define "nodeAffinityPreferredDuringScheduling" }} + {{- range $key, $val := .Values.global.arch }} + {{- if gt ($val | int) 0 }} + - weight: {{ $val | int }} + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - {{ $key | quote }} + {{- end }} + {{- end }} +{{- end }} + +{{- define "podAntiAffinity" }} +{{- if or .Values.podAntiAffinityLabelSelector .Values.podAntiAffinityTermLabelSelector}} + podAntiAffinity: + {{- if .Values.podAntiAffinityLabelSelector }} + requiredDuringSchedulingIgnoredDuringExecution: + {{- include "podAntiAffinityRequiredDuringScheduling" . }} + {{- end }} + {{- if or .Values.podAntiAffinityTermLabelSelector}} + preferredDuringSchedulingIgnoredDuringExecution: + {{- include "podAntiAffinityPreferredDuringScheduling" . }} + {{- end }} +{{- end }} +{{- end }} + +{{- define "podAntiAffinityRequiredDuringScheduling" }} + {{- range $index, $item := .Values.podAntiAffinityLabelSelector }} + - labelSelector: + matchExpressions: + - key: {{ $item.key }} + operator: {{ $item.operator }} + {{- if $item.values }} + values: + {{- $vals := split "," $item.values }} + {{- range $i, $v := $vals }} + - {{ $v | quote }} + {{- end }} + {{- end }} + topologyKey: {{ $item.topologyKey }} + {{- end }} +{{- end }} + +{{- define "podAntiAffinityPreferredDuringScheduling" }} + {{- range $index, $item := .Values.podAntiAffinityTermLabelSelector }} + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: {{ $item.key }} + operator: {{ $item.operator }} + {{- if $item.values }} + values: + {{- $vals := split "," $item.values }} + {{- range $i, $v := $vals }} + - {{ $v | quote }} + {{- end }} + {{- end }} + topologyKey: {{ $item.topologyKey }} + weight: 100 + {{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/_helpers.tpl b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/_helpers.tpl new file mode 100755 index 000000000..56cfa7335 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "tracing.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "tracing.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/deployment.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/deployment.yaml new file mode 100755 index 000000000..25bb67fd3 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/deployment.yaml @@ -0,0 +1,86 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + app: {{ .Values.provider }} + template: + metadata: + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + sidecar.istio.io/inject: "false" + prometheus.io/scrape: "true" + prometheus.io/port: "14269" +{{- if .Values.jaeger.podAnnotations }} +{{ toYaml .Values.jaeger.podAnnotations | indent 8 }} +{{- end }} + spec: + containers: + - name: jaeger + image: "{{ template "system_default_registry" . }}{{ .Values.jaeger.repository }}:{{ .Values.jaeger.tag }}" + imagePullPolicy: {{ .Values.global.imagePullPolicy }} + env: + {{- if eq .Values.jaeger.spanStorageType "badger" }} + - name: BADGER_EPHEMERAL + value: "false" + - name: SPAN_STORAGE_TYPE + value: "badger" + - name: BADGER_DIRECTORY_VALUE + value: "/badger/data" + - name: BADGER_DIRECTORY_KEY + value: "/badger/key" + {{- end }} + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: MEMORY_MAX_TRACES + value: "{{ .Values.jaeger.memory.max_traces }}" + - name: QUERY_BASE_PATH + value: {{ if .Values.contextPath }} {{ .Values.contextPath }} {{ else }} /{{ .Values.provider }} {{ end }} + livenessProbe: + httpGet: + path: / + port: 14269 + readinessProbe: + httpGet: + path: / + port: 14269 +{{- if eq .Values.jaeger.spanStorageType "badger" }} + volumeMounts: + - name: data + mountPath: /badger +{{- end }} + resources: +{{- if .Values.jaeger.resources }} +{{ toYaml .Values.jaeger.resources | indent 12 }} +{{- else }} +{{ toYaml .Values.global.defaultResources | indent 12 }} +{{- end }} + affinity: + {{- include "nodeAffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} + {{- if .Values.global.rbac.pspEnabled }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + serviceAccountName: {{ include "tracing.fullname" . }} + {{- end }} +{{- if eq .Values.jaeger.spanStorageType "badger" }} + volumes: + - name: data +{{- if .Values.jaeger.persistentVolumeClaim.enabled }} + persistentVolumeClaim: + claimName: istio-jaeger-pvc +{{- else }} + emptyDir: {} +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/psp.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/psp.yaml new file mode 100755 index 000000000..44b230492 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/psp.yaml @@ -0,0 +1,86 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "tracing.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "tracing.fullname" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: +- apiGroups: + - policy + resourceNames: + - {{ include "tracing.fullname" . }} + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + forbiddenSysctls: + - '*' + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + runAsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - emptyDir + - secret + - persistentVolumeClaim +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/pvc.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/pvc.yaml new file mode 100755 index 000000000..9b4c55e4f --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/pvc.yaml @@ -0,0 +1,16 @@ +{{- if .Values.jaeger.persistentVolumeClaim.enabled }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: istio-jaeger-pvc + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} +spec: + storageClassName: {{ .Values.jaeger.storageClassName }} + accessModes: + - {{ .Values.jaeger.accessMode }} + resources: + requests: + storage: {{.Values.jaeger.persistentVolumeClaim.storage }} +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/service.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/service.yaml new file mode 100755 index 000000000..4210a9b5f --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/templates/service.yaml @@ -0,0 +1,63 @@ +apiVersion: v1 +kind: Service +metadata: + name: tracing + namespace: {{ .Release.Namespace }} + annotations: + {{- range $key, $val := .Values.service.annotations }} + {{ $key }}: {{ $val | quote }} + {{- end }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + type: {{ .Values.service.type }} + ports: + - name: {{ .Values.service.name }} + port: {{ .Values.service.externalPort }} + protocol: TCP + targetPort: 16686 + selector: + app: {{ .Values.provider }} +--- +# Jaeger implements the Zipkin API. To support swapping out the tracing backend, we use a Service named Zipkin. +apiVersion: v1 +kind: Service +metadata: + name: zipkin + namespace: {{ .Release.Namespace }} + labels: + name: zipkin + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + ports: + - name: {{ .Values.service.name }} + port: {{ .Values.zipkin.queryPort }} + targetPort: {{ .Values.zipkin.queryPort }} + selector: + app: {{ .Values.provider }} +--- +apiVersion: v1 +kind: Service +metadata: + name: jaeger-collector + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + type: ClusterIP + ports: + - name: jaeger-collector-http + port: 14268 + targetPort: 14268 + protocol: TCP + - name: jaeger-collector-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + selector: + app: {{ .Values.provider }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/values.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/values.yaml new file mode 100755 index 000000000..18ff81c3c --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/charts/tracing/values.yaml @@ -0,0 +1,44 @@ +provider: jaeger +contextPath: "" +nodeSelector: {} +podAntiAffinityLabelSelector: [] +podAntiAffinityTermLabelSelector: [] +nameOverride: "" +fullnameOverride: "" + +global: + cattle: + systemDefaultRegistry: "" + defaultResources: {} + imagePullPolicy: IfNotPresent + imagePullSecrets: [] + arch: + amd64: 2 + s390x: 2 + ppc64le: 2 + defaultNodeSelector: {} + rbac: + pspEnabled: false + +jaeger: + repository: rancher/mirrored-jaegertracing-all-in-one + tag: 1.20.0 + # spanStorageType value can be "memory" and "badger" for all-in-one image + spanStorageType: badger + resources: + requests: + cpu: 10m + persistentVolumeClaim: + enabled: false + storage: 5Gi + storageClassName: "" + accessMode: ReadWriteMany + memory: + max_traces: 50000 +zipkin: + queryPort: 9411 +service: + annotations: {} + name: http-query + type: ClusterIP + externalPort: 16686 diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/configs/istio-base.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/configs/istio-base.yaml new file mode 100755 index 000000000..7ff972e2d --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/configs/istio-base.yaml @@ -0,0 +1,89 @@ +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + addonComponents: + istiocoredns: + enabled: {{ .Values.istiocoredns.enabled }} + components: + base: + enabled: {{ .Values.base.enabled }} + cni: + enabled: {{ .Values.cni.enabled }} + egressGateways: + - enabled: {{ .Values.egressGateways.enabled }} + name: istio-egressgateway + ingressGateways: + - enabled: {{ .Values.ingressGateways.enabled }} + name: istio-ingressgateway + k8s: + service: + ports: + - name: status-port + port: 15021 + targetPort: 15021 + - name: http2 + port: 80 + targetPort: 8080 + nodePort: 31380 + - name: https + port: 443 + targetPort: 8443 + nodePort: 31390 + - name: tcp + port: 31400 + targetPort: 31400 + nodePort: 31400 + - name: tls + port: 15443 + targetPort: 15443 + istiodRemote: + enabled: {{ .Values.istiodRemote.enabled }} + pilot: + enabled: {{ .Values.pilot.enabled }} + hub: {{ .Values.systemDefaultRegistry | default "docker.io" }} + profile: default + tag: {{ .Values.tag }} + revision: {{ .Values.revision }} + meshConfig: + defaultConfig: + proxyMetadata: + {{- if .Values.dns.enabled }} + ISTIO_META_DNS_CAPTURE: "true" + {{- end }} + values: + gateways: + istio-egressgateway: + name: istio-egressgateway + type: {{ .Values.egressGateways.type }} + istio-ingressgateway: + name: istio-ingressgateway + type: {{ .Values.ingressGateways.type }} + global: + istioNamespace: {{ template "istio.namespace" . }} + proxy: + image: {{ template "system_default_registry" . }}{{ .Values.global.proxy.repository }}:{{ .Values.global.proxy.tag }} + proxy_init: + image: {{ template "system_default_registry" . }}{{ .Values.global.proxy_init.repository }}:{{ .Values.global.proxy_init.tag }} + {{- if .Values.global.defaultPodDisruptionBudget.enabled }} + defaultPodDisruptionBudget: + enabled: {{ .Values.global.defaultPodDisruptionBudget.enabled }} + {{- end }} + istiocoredns: + coreDNSImage: {{ template "system_default_registry" . }}{{ .Values.istiocoredns.image.repository }} + coreDNSPluginImage: {{ template "system_default_registry" . }}{{ .Values.istiocoredns.pluginImage.repository }}:{{ .Values.istiocoredns.pluginImage.tag }} + coreDNSTag: {{ .Values.istiocoredns.image.tag }} + {{- if .Values.pilot.enabled }} + pilot: + image: {{ template "system_default_registry" . }}{{ .Values.pilot.repository }}:{{ .Values.pilot.tag }} + {{- end }} + telemetry: + enabled: {{ .Values.telemetry.enabled }} + v2: + enabled: {{ .Values.telemetry.v2.enabled }} + {{- if .Values.cni.enabled }} + cni: + image: {{ template "system_default_registry" . }}{{ .Values.cni.repository }}:{{ .Values.cni.tag }} + excludeNamespaces: + {{- toYaml .Values.cni.excludeNamespaces | nindent 8 }} + logLevel: {{ .Values.cni.logLevel }} + {{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/requirements.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/requirements.yaml new file mode 100755 index 000000000..b60745780 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/requirements.yaml @@ -0,0 +1,17 @@ +dependencies: +- name: kiali + version: "" + repository: file://./charts/kiali + condition: kiali.enabled + tags: [] + enabled: false + importvalues: [] + alias: "" +- name: tracing + version: "" + repository: file://./charts/tracing + condition: tracing.enabled + tags: [] + enabled: false + importvalues: [] + alias: "" diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/samples/overlay-example.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/samples/overlay-example.yaml new file mode 100755 index 000000000..5cf3cf3b0 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/samples/overlay-example.yaml @@ -0,0 +1,37 @@ +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + components: + ingressGateways: + - enabled: true + name: ilb-gateway + namespace: user-ingressgateway-ns + k8s: + resources: + requests: + cpu: 200m + service: + ports: + - name: tcp-citadel-grpc-tls + port: 8060 + targetPort: 8060 + - name: tcp-dns + port: 5353 + serviceAnnotations: + cloud.google.com/load-balancer-type: internal + - enabled: true + name: other-gateway + namespace: cattle-istio-system + k8s: + resources: + requests: + cpu: 200m + service: + ports: + - name: tcp-citadel-grpc-tls + port: 8060 + targetPort: 8060 + - name: tcp-dns + port: 5353 + serviceAnnotations: + cloud.google.com/load-balancer-type: internal diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/_helpers.tpl b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/_helpers.tpl new file mode 100755 index 000000000..3f7af953a --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/_helpers.tpl @@ -0,0 +1,12 @@ +{{/* Ensure namespace is set the same everywhere */}} +{{- define "istio.namespace" -}} + {{- .Release.Namespace | default "istio-system" -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/admin-role.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/admin-role.yaml new file mode 100755 index 000000000..ad1313c4f --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/admin-role.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: istio-admin + namespace: {{ template "istio.namespace" . }} +rules: + - apiGroups: + - config.istio.io + resources: + - adapters + - attributemanifests + - handlers + - httpapispecbindings + - httpapispecs + - instances + - quotaspecbindings + - quotaspecs + - rules + - templates + verbs: ["get", "watch", "list"] + - apiGroups: + - networking.istio.io + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + - workloadentries + verbs: + - '*' + - apiGroups: + - security.istio.io + resources: + - authorizationpolicies + - peerauthentications + - requestauthentications + verbs: + - '*' diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/base-config-map.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/base-config-map.yaml new file mode 100755 index 000000000..5323917bc --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/base-config-map.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-installer-base + namespace: {{ template "istio.namespace" . }} +data: +{{ tpl (.Files.Glob "configs/*").AsConfig . | indent 2 }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/clusterrole.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/clusterrole.yaml new file mode 100755 index 000000000..a93b3df95 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/clusterrole.yaml @@ -0,0 +1,120 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-installer +rules: +# istio groups +- apiGroups: + - authentication.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - config.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - install.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - networking.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - rbac.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - security.istio.io + resources: + - '*' + verbs: + - '*' +# k8s groups +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions.apiextensions.k8s.io + - customresourcedefinitions + verbs: + - '*' +- apiGroups: + - apps + - extensions + resources: + - daemonsets + - deployments + - deployments/finalizers + - ingresses + - replicasets + - statefulsets + verbs: + - '*' +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - '*' +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - roles + - rolebindings + verbs: + - '*' +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - namespaces + - pods + - pods/exec + - persistentvolumeclaims + - secrets + - services + - serviceaccounts + verbs: + - '*' +- apiGroups: + - policy + resourceNames: + - istio-installer + resources: + - podsecuritypolicies + verbs: + - use diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/clusterrolebinding.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/clusterrolebinding.yaml new file mode 100755 index 000000000..9d74a0434 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-installer +subjects: +- kind: ServiceAccount + name: istio-installer + namespace: {{ template "istio.namespace" . }} +roleRef: + kind: ClusterRole + name: istio-installer + apiGroup: rbac.authorization.k8s.io diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/edit-role.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/edit-role.yaml new file mode 100755 index 000000000..d1059d58d --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/edit-role.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + namespace: {{ template "istio.namespace" . }} + name: istio-edit +rules: + - apiGroups: + - config.istio.io + resources: + - adapters + - attributemanifests + - handlers + - httpapispecbindings + - httpapispecs + - instances + - quotaspecbindings + - quotaspecs + - rules + - templates + verbs: ["get", "watch", "list"] + - apiGroups: + - networking.istio.io + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + - workloadentries + verbs: + - '*' + - apiGroups: + - security.istio.io + resources: + - authorizationpolicies + - peerauthentications + - requestauthentications + verbs: + - '*' diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-cni-psp.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-cni-psp.yaml new file mode 100755 index 000000000..5b94c8503 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-cni-psp.yaml @@ -0,0 +1,51 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp-istio-cni + namespace: {{ template "istio.namespace" . }} +spec: + allowPrivilegeEscalation: true + fsGroup: + rule: RunAsAny + hostNetwork: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - secret + - configMap + - emptyDir + - hostPath +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: psp-istio-cni + namespace: {{ template "istio.namespace" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: psp-istio-cni +subjects: + - kind: ServiceAccount + name: istio-cni +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: psp-istio-cni + namespace: {{ template "istio.namespace" . }} +rules: +- apiGroups: + - policy + resourceNames: + - psp-istio-cni + resources: + - podsecuritypolicies + verbs: + - use +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-install-job.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-install-job.yaml new file mode 100755 index 000000000..9a13f5698 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-install-job.yaml @@ -0,0 +1,50 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: istioctl-installer + namespace: {{ template "istio.namespace" . }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + backoffLimit: 1 + template: + spec: + containers: + - name: istioctl-installer + image: {{ template "system_default_registry" . }}{{ .Values.installer.repository }}:{{ .Values.installer.tag }} + env: + - name: RELEASE_NAME + value: {{ .Release.Name }} + - name: ISTIO_NAMESPACE + value: {{ template "istio.namespace" . }} + - name: FORCE_INSTALL + value: {{ .Values.forceInstall | default "false" | quote }} + command: ["/bin/sh","-c"] + args: ["/usr/local/app/scripts/run.sh"] + volumeMounts: + - name: config-volume + mountPath: /app/istio-base.yaml + subPath: istio-base.yaml + {{- if .Values.overlayFile }} + - name: overlay-volume + mountPath: /app/overlay-config.yaml + subPath: overlay-config.yaml + {{- end }} + volumes: + - name: config-volume + configMap: + name: istio-installer-base + {{- if .Values.overlayFile }} + - name: overlay-volume + configMap: + name: istio-installer-overlay + {{- end }} + serviceAccountName: istio-installer + {{- if .Values.global.rbac.pspEnabled }} + securityContext: + runAsUser: 101 + runAsGroup: 101 + {{- end }} + restartPolicy: Never diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-install-psp.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-install-psp.yaml new file mode 100755 index 000000000..f0b5ee565 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-install-psp.yaml @@ -0,0 +1,30 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: istio-installer + namespace: {{ template "istio.namespace" . }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'configMap' + - 'secret' +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-psp.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-psp.yaml new file mode 100755 index 000000000..b3758b74f --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-psp.yaml @@ -0,0 +1,81 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: istio-psp + namespace: {{ template "istio.namespace" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: istio-psp +subjects: + - kind: ServiceAccount + name: istio-egressgateway-service-account + - kind: ServiceAccount + name: istio-ingressgateway-service-account + - kind: ServiceAccount + name: istio-mixer-service-account + - kind: ServiceAccount + name: istio-operator-authproxy + - kind: ServiceAccount + name: istiod-service-account + - kind: ServiceAccount + name: istio-sidecar-injector-service-account + - kind: ServiceAccount + name: istiocoredns-service-account + - kind: ServiceAccount + name: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: istio-psp + namespace: {{ template "istio.namespace" . }} +rules: +- apiGroups: + - policy + resourceNames: + - istio-psp + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: istio-psp + namespace: {{ template "istio.namespace" . }} +spec: + allowPrivilegeEscalation: false + forbiddenSysctls: + - '*' + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + runAsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI + - persistentVolumeClaim +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-uninstall-job.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-uninstall-job.yaml new file mode 100755 index 000000000..a7f156325 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/istio-uninstall-job.yaml @@ -0,0 +1,45 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: istioctl-uninstaller + namespace: {{ template "istio.namespace" . }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + spec: + containers: + - name: istioctl-uninstaller + image: {{ template "system_default_registry" . }}{{ .Values.installer.repository }}:{{ .Values.installer.tag }} + env: + - name: RELEASE_NAME + value: {{ .Release.Name }} + - name: ISTIO_NAMESPACE + value: {{ template "istio.namespace" . }} + command: ["/bin/sh","-c"] + args: ["/usr/local/app/scripts/uninstall_istio_system.sh"] + volumeMounts: + - name: config-volume + mountPath: /app/istio-base.yaml + subPath: istio-base.yaml + {{- if .Values.overlayFile }} + - name: overlay-volume + mountPath: /app/overlay-config.yaml + subPath: overlay-config.yaml + {{ end }} + volumes: + - name: config-volume + configMap: + name: istio-installer-base + {{- if .Values.overlayFile }} + - name: overlay-volume + configMap: + name: istio-installer-overlay + {{ end }} + serviceAccountName: istio-installer + securityContext: + runAsUser: 101 + runAsGroup: 101 + restartPolicy: OnFailure diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/overlay-config-map.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/overlay-config-map.yaml new file mode 100755 index 000000000..287d26b2c --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/overlay-config-map.yaml @@ -0,0 +1,9 @@ +{{- if .Values.overlayFile }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-installer-overlay + namespace: {{ template "istio.namespace" . }} +data: + overlay-config.yaml: {{ toYaml .Values.overlayFile | indent 2 }} +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/service-monitors.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/service-monitors.yaml new file mode 100755 index 000000000..c3d60c4fc --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/service-monitors.yaml @@ -0,0 +1,51 @@ +{{- if .Values.kiali.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: envoy-stats-monitor + namespace: {{ template "istio.namespace" . }} + labels: + monitoring: istio-proxies +spec: + selector: + matchExpressions: + - {key: istio-prometheus-ignore, operator: DoesNotExist} + namespaceSelector: + any: true + jobLabel: envoy-stats + endpoints: + - path: /stats/prometheus + targetPort: 15090 + interval: 15s + relabelings: + - sourceLabels: [__meta_kubernetes_pod_container_port_name] + action: keep + regex: '.*-envoy-prom' + - action: labeldrop + regex: "__meta_kubernetes_pod_label_(.+)" + - sourceLabels: [__meta_kubernetes_namespace] + action: replace + targetLabel: namespace + - sourceLabels: [__meta_kubernetes_pod_name] + action: replace + targetLabel: pod_name +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: istio-component-monitor + namespace: {{ template "istio.namespace" . }} + labels: + monitoring: istio-components +spec: + jobLabel: istio + targetLabels: [app] + selector: + matchExpressions: + - {key: istio, operator: In, values: [pilot]} + namespaceSelector: + any: true + endpoints: + - port: http-monitoring + interval: 15s +{{- end -}} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/serviceaccount.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/serviceaccount.yaml new file mode 100755 index 000000000..82b6cbb7e --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-installer + namespace: {{ template "istio.namespace" . }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/view-role.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/view-role.yaml new file mode 100755 index 000000000..5947d3eba --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/templates/view-role.yaml @@ -0,0 +1,41 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + namespace: {{ template "istio.namespace" . }} + name: istio-view +rules: + - apiGroups: + - config.istio.io + resources: + - adapters + - attributemanifests + - handlers + - httpapispecbindings + - httpapispecs + - instances + - quotaspecbindings + - quotaspecs + - rules + - templates + verbs: ["get", "watch", "list"] + - apiGroups: + - networking.istio.io + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + - workloadentries + verbs: ["get", "watch", "list"] + - apiGroups: + - security.istio.io + resources: + - authorizationpolicies + - peerauthentications + - requestauthentications + verbs: ["get", "watch", "list"] diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.500/values.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.500/values.yaml new file mode 100755 index 000000000..8b97b7045 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.500/values.yaml @@ -0,0 +1,95 @@ +overlayFile: "" +tag: 1.8.5 +##Setting forceInstall: true will remove the check for istio version < 1.6.x and will not analyze your install cluster prior to install +forceInstall: false + +installer: + repository: rancher/istio-installer + tag: 1.8.5-rancher1 + +##Deprecated as of 1.8 and unsupported in 1.9, native support provided by enabling `dns.enabled=true` +istiocoredns: + enabled: false + image: + repository: rancher/mirrored-coredns-coredns + tag: 1.6.2 + pluginImage: + repository: rancher/mirrored-istio-coredns-plugin + tag: 0.2-istio-1.1 + +##Native support for dns added in 1.8 +dns: + enabled: false + +base: + enabled: true + +cni: + enabled: false + repository: rancher/mirrored-istio-install-cni + tag: 1.8.5 + logLevel: info + excludeNamespaces: + - istio-system + - kube-system + +egressGateways: + enabled: false + type: NodePort + +ingressGateways: + enabled: true + type: NodePort + +istiodRemote: + enabled: false + +pilot: + enabled: true + repository: rancher/mirrored-istio-pilot + tag: 1.8.5 + +telemetry: + enabled: true + v2: + enabled: true + +global: + cattle: + systemDefaultRegistry: "" + proxy: + repository: rancher/mirrored-istio-proxyv2 + tag: 1.8.5 + proxy_init: + repository: rancher/mirrored-istio-proxyv2 + tag: 1.8.5 + defaultPodDisruptionBudget: + enabled: true + rbac: + pspEnabled: false + +# Kiali subchart from rancher-kiali-server +kiali: + enabled: true + auth: + strategy: anonymous + deployment: + ingress_enabled: false + repository: rancher/mirrored-kiali-kiali + tag: v1.32.0 + external_services: + prometheus: + custom_metrics_url: "http://rancher-monitoring-prometheus.cattle-monitoring-system.svc:9090" + url: "http://rancher-monitoring-prometheus.cattle-monitoring-system.svc:9090" + tracing: + in_cluster_url: "http://tracing.istio-system.svc:16686/jaeger" + grafana: + in_cluster_url: "http://rancher-monitoring-grafana.cattle-monitoring-system.svc:80" + url: "http://rancher-monitoring-grafana.cattle-monitoring-system.svc:80" + +tracing: + enabled: false + contextPath: "/jaeger" + jaeger: + repository: rancher/mirrored-jaegertracing-all-in-one + tag: 1.20.0 diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/Chart.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/Chart.yaml new file mode 100755 index 000000000..2068b58de --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/Chart.yaml @@ -0,0 +1,21 @@ +annotations: + catalog.cattle.io/auto-install: rancher-kiali-server-crd=1.32.100 + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: Istio + catalog.cattle.io/namespace: istio-system + catalog.cattle.io/os: linux + catalog.cattle.io/provides-gvr: networking.istio.io.virtualservice/v1beta1 + catalog.cattle.io/release-name: rancher-istio + catalog.cattle.io/requests-cpu: 710m + catalog.cattle.io/requests-memory: 2314Mi + catalog.cattle.io/ui-component: istio +apiVersion: v1 +appVersion: 1.8.6 +description: A basic Istio setup that installs with the istioctl. Refer to https://istio.io/latest/ + for details. +icon: https://charts.rancher.io/assets/logos/istio.svg +keywords: +- networking +- infrastructure +name: rancher-istio +version: 1.8.600 diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/README.md b/charts/rancher-istio-1.8/rancher-istio/1.8.600/README.md new file mode 100755 index 000000000..199e45312 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/README.md @@ -0,0 +1,69 @@ +# Rancher Istio Installers + +A Rancher created chart that packages the istioctl binary to install via a helm chart. + +# Installation Requirements + +## Chart Dependencies +- rancher-kiali-server-crd chart + +# Uninstallation Requirements +To ensure rancher-istio uninstalls correctly, you must uninstall rancher-istio prior to uninstalling chart dependencies (see installation requirements for chart dependencies). This is because all definitions need to be available in order to properly build the rancher-istio objects for removal. + +If you remove dependent CRD charts prior to removing rancher-istio, you may encounter the following error:: + +`Error: uninstallation completed with 1 error(s): unable to build kubernetes objects for delete: unable to recognize "": no matches for kind "MonitoringDashboard" in version "monitoring.kiali.io/v1alpha1"` + +# Addons + +## Kiali + +Kiali allows you to view and manage your istio-based service mesh through an easy to use dashboard. + +#### Dependencies +- rancher-monitoring chart or other Prometheus installation + +This dependecy installs the required CRDs for installing Kiali. Since Kiali is bundled in with Istio in this chart, if you do not have these dependencies installed, your Istio installation will fail. If you do not plan on using Kiali, set `kiali.enabled=false` when installing Istio for a succesful installation. + +> **Note:** The following configuration options assume you have installed the dependecies for Kiali. Please ensure you have Promtheus in your cluster before proceeding. + +The Monitoring app sets `prometheus.prometheusSpec.ignoreNamespaceSelectors=false` which means all namespaces will be scraped by Prometheus by default. This ensures you can view traffic, metrics and graphs for resources deployed in other namespaces. + +To limit scraping to specific namespaces, set `prometheus.prometheusSpec.ignoreNamespaceSelectors=true` and add one of the following configurations to ensure you can continue to view traffic, metrics and graphs for your deployed resources. + +1. Add a Service Monitor or Pod Monitor in the namespace with the targets you want to scrape. +1. Add an additionalScrapeConfig to your rancher-monitoring instance to scrape all targets in all namespaces. + +#### External Services + +##### Prometheus +The `kiali.external_services.prometheus` url is set in the values.yaml: +``` +http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc:{{ prometheus.service.port }} +``` +The url depends on the default values for `nameOverride`, `namespaceOverride`, and `prometheus.service.port` being set in your rancher-monitoring or other monitoring instance. + +##### Grafana +The `kiali.external_services.grafana` url is set in the values.yaml: +``` +http://{{ .Values.nameOverride }}-grafana.{{ .Values.namespaceOverride }}.svc:{{ grafana.service.port }} +``` +The url depends on the default values for `nameOverride`, `namespaceOverride`, and `grafana.service.port` being set in your rancher-monitoring or other monitoring instance. + +##### Tracing +The `kiali.external_services.tracing` url and `.Values.tracing.contextPath` is set in the rancher-istio values.yaml: +``` +http://tracing.{{ .Values.namespaceOverride }}.svc:{{ .Values.service.externalPort }}/{{ .Values.tracing.contextPath }} +``` +The url depends on the default values for `namespaceOverride`, and `.Values.service.externalPort` being set in your rancher-tracing or other tracing instance. + +## Jaeger + +Jaeger allows you to trace and monitor distributed microservices. + +> **Note:** This addon is using the all-in-one Jaeger installation which is not qualified for production. Use the [Jaeger Tracing](https://www.jaegertracing.io/docs/1.21/getting-started/) documentation to determine which installation you will need for your production needs. + +# Installation +``` +helm install rancher-istio . --create-namespace -n istio-system +``` \ No newline at end of file diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/app-readme.md b/charts/rancher-istio-1.8/rancher-istio/1.8.600/app-readme.md new file mode 100755 index 000000000..0e42df083 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/app-readme.md @@ -0,0 +1,45 @@ +# Rancher Istio + +Our [Istio](https://istio.io/) installer wraps the istioctl binary commands in a handy helm chart, including an overlay file option to allow complex customization. It also includes: +* **[Kiali](https://kiali.io/)**: Used for graphing traffic flow throughout the mesh +* **[Jaeger](https://www.jaegertracing.io/)**: A quick start, all-in-one installation used for tracing distributed systemm. This is not production qualified, please refer to jaeger documentation to determine which installation you may need instead. + +### Dependencies + +**Rancher Monitoring or other Prometheus installation** + +The Prometheus CRDs are required for installing Kiali which is enabled by default. If you do not have Prometheus installed your Istio installation will fail. If you do not plan on using Kiali, set `kiali.enabled=false` to bypass this requirement. + +### Customization + +**Rancher Monitoring** + +The Rancher Monitoring app sets `prometheus.prometheusSpec.ignoreNamespaceSelectors=false` which means all namespaces will be scraped by Prometheus by default. This ensures you can view traffic, metrics and graphs for resources deployed in other namespaces. + +To limit scraping to specific namespaces, set `prometheus.prometheusSpec.ignoreNamespaceSelectors=true` and add one of the following configurations to ensure you can continue to view traffic, metrics and graphs for your deployed resources. + +1. Add a Service Monitor or Pod Monitor in the namespace with the targets you want to scrape. +1. Add an additionalScrapeConfig to your rancher-monitoring instance to scrape all targets in all namespaces. + +**Custom Prometheus Installation with Kiali** + +To use a custom Monitoring installation, set the `kiali.external_services.prometheus` url in the values.yaml. This url depends on the values for `nameOverride`, `namespaceOverride`, and `prometheus.service.port` in your rancher-monitoring or other monitoring instance: +``` +http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc:{{ prometheus.service.port }} +``` +**Custom Grafana Installation with Kiali** + +To use a custom Grafana installation, set the `kiali.external_services.grafana` url in the values.yaml. This url depends on the values for `nameOverride`, `namespaceOverride`, and `granfa.service.port` in your rancher-monitoring or other grafana instance: +``` +http://{{ .Values.nameOverride }}-grafana.{{ .Values.namespaceOverride }}.svc:{{ grafana.service.port }} +``` +**Custom Tracing Installation with Kiali** + +To use a custom Tracing installation, set the `kiali.external_services.tracing` url and update the `.Values.tracing.contextPath` in the rancher-istio values.yaml. + +This url depends on the values for `namespaceOverride`, and `.Values.service.externalPort` in your rancher-tracing or other tracing instance.: +``` +http://tracing.{{ .Values.namespaceOverride }}.svc:{{ .Values.service.externalPort }}/{{ .Values.tracing.contextPath }} +``` + +For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/istio/v2.5/). diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/Chart.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/Chart.yaml new file mode 100755 index 000000000..9b6fdf385 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/Chart.yaml @@ -0,0 +1,31 @@ +annotations: + catalog.cattle.io/auto-install: rancher-kiali-server-crd=match + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.cattle.io/provides-gvr: monitoringdashboards.monitoring.kiali.io/v1alpha1 + catalog.cattle.io/requires-gvr: monitoring.coreos.com.prometheus/v1 + catalog.rancher.io/namespace: cattle-istio-system + catalog.rancher.io/release-name: rancher-kiali-server +apiVersion: v2 +appVersion: v1.32.0 +description: Kiali is an open source project for service mesh observability, refer + to https://www.kiali.io for details. This is installed as sub-chart with customized + values in Rancher's Istio. +home: https://github.com/kiali/kiali +icon: https://raw.githubusercontent.com/kiali/kiali.io/master/themes/kiali/static/img/kiali_logo_masthead.png +keywords: +- istio +- kiali +- networking +- infrastructure +maintainers: +- email: kiali-users@googlegroups.com + name: Kiali + url: https://kiali.io +name: kiali +sources: +- https://github.com/kiali/kiali +- https://github.com/kiali/kiali-ui +- https://github.com/kiali/kiali-operator +- https://github.com/kiali/helm-charts +version: 1.32.1 diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/NOTES.txt b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/NOTES.txt new file mode 100755 index 000000000..751019401 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/NOTES.txt @@ -0,0 +1,5 @@ +Welcome to Kiali! For more details on Kiali, see: https://kiali.io + +The Kiali Server [{{ .Chart.AppVersion }}] has been installed in namespace [{{ .Release.Namespace }}]. It will be ready soon. + +(Helm: Chart=[{{ .Chart.Name }}], Release=[{{ .Release.Name }}], Version=[{{ .Chart.Version }}]) diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/_helpers.tpl b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/_helpers.tpl new file mode 100755 index 000000000..dd33bbe48 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/_helpers.tpl @@ -0,0 +1,192 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "kiali-server.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kiali-server.fullname" -}} +{{- if .Values.fullnameOverride }} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} + {{- $name := default .Chart.Name .Values.nameOverride }} + {{- printf "%s" $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kiali-server.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Identifies the log_level with the old verbose_mode and the new log_level considered. +*/}} +{{- define "kiali-server.logLevel" -}} +{{- if .Values.deployment.verbose_mode -}} +{{- .Values.deployment.verbose_mode -}} +{{- else -}} +{{- .Values.deployment.logger.log_level -}} +{{- end -}} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "kiali-server.labels" -}} +helm.sh/chart: {{ include "kiali-server.chart" . }} +app: {{ include "kiali-server.name" . }} +{{ include "kiali-server.selectorLabels" . }} +version: {{ .Values.deployment.version_label | default .Chart.AppVersion | quote }} +app.kubernetes.io/version: {{ .Values.deployment.version_label | default .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/part-of: "kiali" +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kiali-server.selectorLabels" -}} +app.kubernetes.io/name: {{ include "kiali-server.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Used to determine if a custom dashboard (defined in .Template.Name) should be deployed. +*/}} +{{- define "kiali-server.isDashboardEnabled" -}} +{{- if .Values.external_services.custom_dashboards.enabled }} + {{- $includere := "" }} + {{- range $_, $s := .Values.deployment.custom_dashboards.includes }} + {{- if $s }} + {{- if $includere }} + {{- $includere = printf "%s|^%s$" $includere ($s | replace "*" ".*" | replace "?" ".") }} + {{- else }} + {{- $includere = printf "^%s$" ($s | replace "*" ".*" | replace "?" ".") }} + {{- end }} + {{- end }} + {{- end }} + {{- $excludere := "" }} + {{- range $_, $s := .Values.deployment.custom_dashboards.excludes }} + {{- if $s }} + {{- if $excludere }} + {{- $excludere = printf "%s|^%s$" $excludere ($s | replace "*" ".*" | replace "?" ".") }} + {{- else }} + {{- $excludere = printf "^%s$" ($s | replace "*" ".*" | replace "?" ".") }} + {{- end }} + {{- end }} + {{- end }} + {{- if (and (mustRegexMatch (default "no-matches" $includere) (base .Template.Name)) (not (mustRegexMatch (default "no-matches" $excludere) (base .Template.Name)))) }} + {{- print "enabled" }} + {{- else }} + {{- print "" }} + {{- end }} +{{- else }} + {{- print "" }} +{{- end }} +{{- end }} + +{{/* +Determine the default login token signing key. +*/}} +{{- define "kiali-server.login_token.signing_key" -}} +{{- if .Values.login_token.signing_key }} + {{- .Values.login_token.signing_key }} +{{- else }} + {{- randAlphaNum 16 }} +{{- end }} +{{- end }} + +{{/* +Determine the default web root. +*/}} +{{- define "kiali-server.server.web_root" -}} +{{- if .Values.server.web_root }} + {{- .Values.server.web_root | trimSuffix "/" }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- "/" }} + {{- else }} + {{- "/kiali" }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Determine the default identity cert file. There is no default if on k8s; only on OpenShift. +*/}} +{{- define "kiali-server.identity.cert_file" -}} +{{- if hasKey .Values.identity "cert_file" }} + {{- .Values.identity.cert_file }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- "/kiali-cert/tls.crt" }} + {{- else }} + {{- "" }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Determine the default identity private key file. There is no default if on k8s; only on OpenShift. +*/}} +{{- define "kiali-server.identity.private_key_file" -}} +{{- if hasKey .Values.identity "private_key_file" }} + {{- .Values.identity.private_key_file }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- "/kiali-cert/tls.key" }} + {{- else }} + {{- "" }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Determine the istio namespace - default is where Kiali is installed. +*/}} +{{- define "kiali-server.istio_namespace" -}} +{{- if .Values.istio_namespace }} + {{- .Values.istio_namespace }} +{{- else }} + {{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{/* +Determine the auth strategy to use - default is "token" on Kubernetes and "openshift" on OpenShift. +*/}} +{{- define "kiali-server.auth.strategy" -}} +{{- if .Values.auth.strategy }} + {{- if (and (eq .Values.auth.strategy "openshift") (not .Values.kiali_route_url)) }} + {{- fail "You did not define what the Kiali Route URL will be (--set kiali_route_url=...). Without this set, the openshift auth strategy will not work. Either set that or use a different auth strategy via the --set auth.strategy=... option." }} + {{- end }} + {{- .Values.auth.strategy }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- if not .Values.kiali_route_url }} + {{- fail "You did not define what the Kiali Route URL will be (--set kiali_route_url=...). Without this set, the openshift auth strategy will not work. Either set that or explicitly indicate another auth strategy you want via the --set auth.strategy=... option." }} + {{- end }} + {{- "openshift" }} + {{- else }} + {{- "token" }} + {{- end }} +{{- end }} +{{- end }} + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/cabundle.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/cabundle.yaml new file mode 100755 index 000000000..7462b95a7 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/cabundle.yaml @@ -0,0 +1,13 @@ +{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kiali-server.fullname" . }}-cabundle + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + annotations: + service.beta.openshift.io/inject-cabundle: "true" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/configmap.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/configmap.yaml new file mode 100755 index 000000000..b1bf53173 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/configmap.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +data: + config.yaml: | + {{- /* Most of .Values is simply the ConfigMap - strip out the keys that are not part of the ConfigMap */}} + {{- $cm := omit .Values "nameOverride" "fullnameOverride" "kiali_route_url" }} + {{- /* The helm chart defines namespace for us, but pass it to the ConfigMap in case the server needs it */}} + {{- $_ := set $cm.deployment "namespace" .Release.Namespace }} + {{- /* Some values of the ConfigMap are generated, but might not be identical, from .Values */}} + {{- $_ := set $cm "istio_namespace" (include "kiali-server.istio_namespace" .) }} + {{- $_ := set $cm.auth "strategy" (include "kiali-server.auth.strategy" .) }} + {{- $_ := set $cm.auth.openshift "client_id_prefix" (include "kiali-server.fullname" .) }} + {{- $_ := set $cm.identity "cert_file" (include "kiali-server.identity.cert_file" .) }} + {{- $_ := set $cm.identity "private_key_file" (include "kiali-server.identity.private_key_file" .) }} + {{- $_ := set $cm.login_token "signing_key" (include "kiali-server.login_token.signing_key" .) }} + {{- $_ := set $cm.server "web_root" (include "kiali-server.server.web_root" .) }} + {{- toYaml $cm | nindent 4 }} +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/envoy.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/envoy.yaml new file mode 100755 index 000000000..85b402017 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/envoy.yaml @@ -0,0 +1,56 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: envoy + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Envoy Metrics + discoverOn: "envoy_server_uptime" + items: + - chart: + name: "Pods uptime" + spans: 4 + metricName: "envoy_server_uptime" + dataType: "raw" + - chart: + name: "Allocated memory" + unit: "bytes" + spans: 4 + metricName: "envoy_server_memory_allocated" + dataType: "raw" + min: 0 + - chart: + name: "Heap size" + unit: "bytes" + spans: 4 + metricName: "envoy_server_memory_heap_size" + dataType: "raw" + min: 0 + - chart: + name: "Upstream active connections" + spans: 6 + metricName: "envoy_cluster_upstream_cx_active" + dataType: "raw" + - chart: + name: "Upstream total requests" + spans: 6 + metricName: "envoy_cluster_upstream_rq_total" + unit: "rps" + dataType: "rate" + - chart: + name: "Downstream active connections" + spans: 6 + metricName: "envoy_listener_downstream_cx_active" + dataType: "raw" + - chart: + name: "Downstream HTTP requests" + spans: 6 + metricName: "envoy_listener_http_downstream_rq" + unit: "rps" + dataType: "rate" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/go.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/go.yaml new file mode 100755 index 000000000..2d2f42a93 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/go.yaml @@ -0,0 +1,67 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: go + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Go Metrics + runtime: Go + discoverOn: "go_info" + items: + - chart: + name: "CPU ratio" + spans: 6 + metricName: "process_cpu_seconds_total" + dataType: "rate" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "RSS Memory" + unit: "bytes" + spans: 6 + metricName: "process_resident_memory_bytes" + dataType: "raw" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "Goroutines" + spans: 6 + metricName: "go_goroutines" + dataType: "raw" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "Heap allocation rate" + unit: "bytes/s" + spans: 6 + metricName: "go_memstats_alloc_bytes_total" + dataType: "rate" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "GC rate" + spans: 6 + metricName: "go_gc_duration_seconds_count" + dataType: "rate" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "Next GC" + unit: "bytes" + spans: 6 + metricName: "go_memstats_next_gc_bytes" + dataType: "raw" + aggregations: + - label: "pod_name" + displayName: "Pod" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/kiali.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/kiali.yaml new file mode 100755 index 000000000..b1f011b4f --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/kiali.yaml @@ -0,0 +1,44 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: kiali + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Kiali Internal Metrics + items: + - chart: + name: "API processing duration" + unit: "seconds" + spans: 6 + metricName: "kiali_api_processing_duration_seconds" + dataType: "histogram" + aggregations: + - label: "route" + displayName: "Route" + - chart: + name: "Functions processing duration" + unit: "seconds" + spans: 6 + metricName: "kiali_go_function_processing_duration_seconds" + dataType: "histogram" + aggregations: + - label: "function" + displayName: "Function" + - label: "package" + displayName: "Package" + - chart: + name: "Failures" + spans: 12 + metricName: "kiali_go_function_failures_total" + dataType: "raw" + aggregations: + - label: "function" + displayName: "Function" + - label: "package" + displayName: "Package" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml new file mode 100755 index 000000000..2e1ed5cff --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml @@ -0,0 +1,43 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: micrometer-1.0.6-jvm-pool + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: JVM + title: JVM Pool Metrics + discoverOn: "jvm_buffer_total_capacity_bytes" + items: + - chart: + name: "Pool buffer memory used" + unit: "bytes" + spans: 4 + metricName: "jvm_buffer_memory_used_bytes" + dataType: "raw" + aggregations: + - label: "id" + displayName: "Pool" + - chart: + name: "Pool buffer capacity" + unit: "bytes" + spans: 4 + metricName: "jvm_buffer_total_capacity_bytes" + dataType: "raw" + aggregations: + - label: "id" + displayName: "Pool" + - chart: + name: "Pool buffer count" + unit: "bytes" + spans: 4 + metricName: "jvm_buffer_count" + dataType: "raw" + aggregations: + - label: "id" + displayName: "Pool" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml new file mode 100755 index 000000000..d64596882 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml @@ -0,0 +1,65 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: micrometer-1.0.6-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: JVM + title: JVM Metrics + discoverOn: "jvm_threads_live" + items: + - chart: + name: "Total live threads" + spans: 4 + metricName: "jvm_threads_live" + dataType: "raw" + - chart: + name: "Daemon threads" + spans: 4 + metricName: "jvm_threads_daemon" + dataType: "raw" + - chart: + name: "Loaded classes" + spans: 4 + metricName: "jvm_classes_loaded" + dataType: "raw" + + - chart: + name: "Memory used" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_used_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory commited" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_committed_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory max" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_max_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml new file mode 100755 index 000000000..76e8d0a4a --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml @@ -0,0 +1,68 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: micrometer-1.1-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: JVM + title: JVM Metrics + discoverOn: "jvm_threads_live_threads" + items: + - chart: + name: "Memory used" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_used_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory commited" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_committed_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory max" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_max_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + + - chart: + name: "Total live threads" + spans: 4 + metricName: "jvm_threads_live_threads" + dataType: "raw" + - chart: + name: "Daemon threads" + spans: 4 + metricName: "jvm_threads_daemon_threads" + dataType: "raw" + - chart: + name: "Threads states" + spans: 4 + metricName: "jvm_threads_states_threads" + dataType: "raw" + aggregations: + - label: "state" + displayName: "State" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/microprofile-1.1.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/microprofile-1.1.yaml new file mode 100755 index 000000000..1d4951196 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/microprofile-1.1.yaml @@ -0,0 +1,59 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: microprofile-1.1 + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: MicroProfile Metrics + runtime: MicroProfile + discoverOn: "base:thread_count" + items: + - chart: + name: "Current loaded classes" + spans: 6 + metricName: "base:classloader_current_loaded_class_count" + dataType: "raw" + - chart: + name: "Unloaded classes" + spans: 6 + metricName: "base:classloader_total_unloaded_class_count" + dataType: "raw" + - chart: + name: "Thread count" + spans: 4 + metricName: "base:thread_count" + dataType: "raw" + - chart: + name: "Thread max count" + spans: 4 + metricName: "base:thread_max_count" + dataType: "raw" + - chart: + name: "Thread daemon count" + spans: 4 + metricName: "base:thread_daemon_count" + dataType: "raw" + - chart: + name: "Committed heap" + unit: "bytes" + spans: 4 + metricName: "base:memory_committed_heap_bytes" + dataType: "raw" + - chart: + name: "Max heap" + unit: "bytes" + spans: 4 + metricName: "base:memory_max_heap_bytes" + dataType: "raw" + - chart: + name: "Used heap" + unit: "bytes" + spans: 4 + metricName: "base:memory_used_heap_bytes" + dataType: "raw" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/microprofile-x.y.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/microprofile-x.y.yaml new file mode 100755 index 000000000..57ddc60ef --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/microprofile-x.y.yaml @@ -0,0 +1,38 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: microprofile-x.y + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: MicroProfile Metrics + runtime: MicroProfile + discoverOn: "base:gc_complete_scavenger_count" + items: + - chart: + name: "Young GC time" + unit: "seconds" + spans: 3 + metricName: "base:gc_young_generation_scavenger_time_seconds" + dataType: "raw" + - chart: + name: "Young GC count" + spans: 3 + metricName: "base:gc_young_generation_scavenger_count" + dataType: "raw" + - chart: + name: "Total GC time" + unit: "seconds" + spans: 3 + metricName: "base:gc_complete_scavenger_time_seconds" + dataType: "raw" + - chart: + name: "Total GC count" + spans: 3 + metricName: "base:gc_complete_scavenger_count" + dataType: "raw" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/nodejs.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/nodejs.yaml new file mode 100755 index 000000000..1ffe0aa10 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/nodejs.yaml @@ -0,0 +1,59 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: nodejs + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Node.js + title: Node.js Metrics + discoverOn: "nodejs_active_handles_total" + items: + - chart: + name: "Active handles" + spans: 4 + metricName: "nodejs_active_handles_total" + dataType: "raw" + - chart: + name: "Active requests" + spans: 4 + metricName: "nodejs_active_requests_total" + dataType: "raw" + - chart: + name: "Event loop lag" + unit: "seconds" + spans: 4 + metricName: "nodejs_eventloop_lag_seconds" + dataType: "raw" + - chart: + name: "Total heap size" + unit: "bytes" + spans: 12 + metricName: "nodejs_heap_space_size_total_bytes" + dataType: "raw" + aggregations: + - label: "space" + displayName: "Space" + - chart: + name: "Used heap size" + unit: "bytes" + spans: 6 + metricName: "nodejs_heap_space_size_used_bytes" + dataType: "raw" + aggregations: + - label: "space" + displayName: "Space" + - chart: + name: "Available heap size" + unit: "bytes" + spans: 6 + metricName: "nodejs_heap_space_size_available_bytes" + dataType: "raw" + aggregations: + - label: "space" + displayName: "Space" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/quarkus.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/quarkus.yaml new file mode 100755 index 000000000..cef5f3dce --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/quarkus.yaml @@ -0,0 +1,33 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: quarkus + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Quarkus Metrics + runtime: Quarkus + items: + - chart: + name: "Thread count" + spans: 4 + metricName: "vendor:thread_count" + dataType: "raw" + - chart: + name: "Used heap" + unit: "bytes" + spans: 4 + metricName: "vendor:memory_heap_usage_bytes" + dataType: "raw" + - chart: + name: "Used non-heap" + unit: "bytes" + spans: 4 + metricName: "vendor:memory_non_heap_usage_bytes" + dataType: "raw" + - include: "microprofile-x.y" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml new file mode 100755 index 000000000..42d87d890 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: springboot-jvm-pool + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Spring Boot + title: JVM Pool Metrics + items: + - include: "micrometer-1.0.6-jvm-pool" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/springboot-jvm.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/springboot-jvm.yaml new file mode 100755 index 000000000..ced3acdd9 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/springboot-jvm.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: springboot-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Spring Boot + title: JVM Metrics + items: + - include: "micrometer-1.0.6-jvm" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/springboot-tomcat.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/springboot-tomcat.yaml new file mode 100755 index 000000000..c07016aa2 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/springboot-tomcat.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: springboot-tomcat + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Spring Boot + title: Tomcat Metrics + items: + - include: "tomcat" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/thorntail.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/thorntail.yaml new file mode 100755 index 000000000..6bd85e6f5 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/thorntail.yaml @@ -0,0 +1,22 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: thorntail + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Thorntail + title: Thorntail Metrics + discoverOn: "vendor:loaded_modules" + items: + - include: "microprofile-1.1" + - chart: + name: "Loaded modules" + spans: 6 + metricName: "vendor:loaded_modules" + dataType: "raw" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/tomcat.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/tomcat.yaml new file mode 100755 index 000000000..9a803342f --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/tomcat.yaml @@ -0,0 +1,67 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: tomcat + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Tomcat + title: Tomcat Metrics + discoverOn: "tomcat_sessions_created_total" + items: + - chart: + name: "Sessions created" + spans: 4 + metricName: "tomcat_sessions_created_total" + dataType: "raw" + - chart: + name: "Active sessions" + spans: 4 + metricName: "tomcat_sessions_active_current" + dataType: "raw" + - chart: + name: "Sessions rejected" + spans: 4 + metricName: "tomcat_sessions_rejected_total" + dataType: "raw" + + - chart: + name: "Bytes sent" + unit: "bitrate" + spans: 6 + metricName: "tomcat_global_sent_bytes_total" + dataType: "rate" + aggregations: + - label: "name" + displayName: "Name" + - chart: + name: "Bytes received" + unit: "bitrate" + spans: 6 + metricName: "tomcat_global_received_bytes_total" + dataType: "rate" + aggregations: + - label: "name" + displayName: "Name" + + - chart: + name: "Global errors" + spans: 6 + metricName: "tomcat_global_error_total" + dataType: "raw" + aggregations: + - label: "name" + displayName: "Name" + - chart: + name: "Servlet errors" + spans: 6 + metricName: "tomcat_servlet_error_total" + dataType: "raw" + aggregations: + - label: "name" + displayName: "Name" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-client.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-client.yaml new file mode 100755 index 000000000..2d591d6b0 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-client.yaml @@ -0,0 +1,60 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-client + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Client Metrics + discoverOn: "vertx_http_client_connections" + items: + - chart: + name: "Client response time" + unit: "seconds" + spans: 6 + metricName: "vertx_http_client_responseTime_seconds" + dataType: "histogram" + aggregations: + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Client request count rate" + unit: "ops" + spans: 6 + metricName: "vertx_http_client_requestCount_total" + dataType: "rate" + aggregations: + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Client active connections" + spans: 6 + metricName: "vertx_http_client_connections" + dataType: "raw" + - chart: + name: "Client active websockets" + spans: 6 + metricName: "vertx_http_client_wsConnections" + dataType: "raw" + - chart: + name: "Client bytes sent" + unit: "bytes" + spans: 6 + metricName: "vertx_http_client_bytesSent" + dataType: "histogram" + - chart: + name: "Client bytes received" + unit: "bytes" + spans: 6 + metricName: "vertx_http_client_bytesReceived" + dataType: "histogram" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-eventbus.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-eventbus.yaml new file mode 100755 index 000000000..65f9ee2ec --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-eventbus.yaml @@ -0,0 +1,59 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-eventbus + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Eventbus Metrics + discoverOn: "vertx_eventbus_handlers" + items: + - chart: + name: "Event bus handlers" + spans: 6 + metricName: "vertx_eventbus_handlers" + dataType: "raw" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus pending messages" + spans: 6 + metricName: "vertx_eventbus_pending" + dataType: "raw" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus processing time" + unit: "seconds" + spans: 6 + metricName: "vertx_eventbus_processingTime_seconds" + dataType: "histogram" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus bytes read" + unit: "bytes" + spans: 6 + metricName: "vertx_eventbus_bytesRead" + dataType: "histogram" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus bytes written" + unit: "bytes" + spans: 6 + metricName: "vertx_eventbus_bytesWritten" + dataType: "histogram" + aggregations: + - label: "address" + displayName: "Eventbus address" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-jvm.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-jvm.yaml new file mode 100755 index 000000000..2663186f3 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-jvm.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: JVM Metrics + items: + - include: "micrometer-1.1-jvm" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-pool.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-pool.yaml new file mode 100755 index 000000000..f6af921b3 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-pool.yaml @@ -0,0 +1,68 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-pool + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Pools Metrics + discoverOn: "vertx_pool_ratio" + items: + - chart: + name: "Usage duration" + unit: "seconds" + spans: 6 + metricName: "vertx_pool_usage_seconds" + dataType: "histogram" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Usage ratio" + spans: 6 + metricName: "vertx_pool_ratio" + dataType: "raw" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Queue size" + spans: 6 + metricName: "vertx_pool_queue_size" + dataType: "raw" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Time in queue" + unit: "seconds" + spans: 6 + metricName: "vertx_pool_queue_delay_seconds" + dataType: "histogram" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Resources used" + spans: 6 + metricName: "vertx_pool_inUse" + dataType: "raw" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-server.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-server.yaml new file mode 100755 index 000000000..de6b89df9 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/dashboards/vertx-server.yaml @@ -0,0 +1,62 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-server + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Server Metrics + discoverOn: "vertx_http_server_connections" + items: + - chart: + name: "Server response time" + unit: "seconds" + spans: 6 + metricName: "vertx_http_server_responseTime_seconds" + dataType: "histogram" + aggregations: + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Server request count rate" + unit: "ops" + spans: 6 + metricName: "vertx_http_server_requestCount_total" + dataType: "rate" + aggregations: + - label: "code" + displayName: "Error code" + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Server active connections" + spans: 6 + metricName: "vertx_http_server_connections" + dataType: "raw" + - chart: + name: "Server active websockets" + spans: 6 + metricName: "vertx_http_server_wsConnections" + dataType: "raw" + - chart: + name: "Server bytes sent" + unit: "bytes" + spans: 6 + metricName: "vertx_http_server_bytesSent" + dataType: "histogram" + - chart: + name: "Server bytes received" + unit: "bytes" + spans: 6 + metricName: "vertx_http_server_bytesReceived" + dataType: "histogram" +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/deployment.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/deployment.yaml new file mode 100755 index 000000000..100c57922 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/deployment.yaml @@ -0,0 +1,174 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.deployment.replicas }} + selector: + matchLabels: + {{- include "kiali-server.selectorLabels" . | nindent 6 }} + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + name: {{ include "kiali-server.fullname" . }} + labels: + {{- include "kiali-server.labels" . | nindent 8 }} + {{- if .Values.deployment.pod_labels }} + {{- toYaml .Values.deployment.pod_labels | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.server.metrics_enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ .Values.server.metrics_port | quote }} + {{- else }} + prometheus.io/scrape: "false" + prometheus.io/port: "" + {{- end }} + kiali.io/runtimes: go,kiali + {{- if .Values.deployment.pod_annotations }} + {{- toYaml .Values.deployment.pod_annotations | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "kiali-server.fullname" . }} + {{- if .Values.deployment.priority_class_name }} + priorityClassName: {{ .Values.deployment.priority_class_name | quote }} + {{- end }} + {{- if .Values.deployment.image_pull_secrets }} + imagePullSecrets: + {{- range .Values.deployment.image_pull_secrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - image: "{{ template "system_default_registry" . }}{{ .Values.deployment.repository }}:{{ .Values.deployment.tag }}" + imagePullPolicy: {{ .Values.deployment.image_pull_policy | default "Always" }} + name: {{ include "kiali-server.fullname" . }} + command: + - "/opt/kiali/kiali" + - "-config" + - "/kiali-configuration/config.yaml" + ports: + - name: api-port + containerPort: {{ .Values.server.port | default 20001 }} + {{- if .Values.server.metrics_enabled }} + - name: http-metrics + containerPort: {{ .Values.server.metrics_port | default 9090 }} + {{- end }} + readinessProbe: + httpGet: + path: {{ include "kiali-server.server.web_root" . | trimSuffix "/" }}/healthz + port: api-port + {{- if (include "kiali-server.identity.cert_file" .) }} + scheme: HTTPS + {{- else }} + scheme: HTTP + {{- end }} + initialDelaySeconds: 5 + periodSeconds: 30 + livenessProbe: + httpGet: + path: {{ include "kiali-server.server.web_root" . | trimSuffix "/" }}/healthz + port: api-port + {{- if (include "kiali-server.identity.cert_file" .) }} + scheme: HTTPS + {{- else }} + scheme: HTTP + {{- end }} + initialDelaySeconds: 5 + periodSeconds: 30 + env: + - name: ACTIVE_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "{{ include "kiali-server.logLevel" . }}" + - name: LOG_FORMAT + value: "{{ .Values.deployment.logger.log_format }}" + - name: LOG_TIME_FIELD_FORMAT + value: "{{ .Values.deployment.logger.time_field_format }}" + - name: LOG_SAMPLER_RATE + value: "{{ .Values.deployment.logger.sampler_rate }}" + volumeMounts: + {{- if .Values.web_root_override }} + - name: kiali-console + subPath: env.js + mountPath: /opt/kiali/console/env.js + {{- end }} + - name: {{ include "kiali-server.fullname" . }}-configuration + mountPath: "/kiali-configuration" + - name: {{ include "kiali-server.fullname" . }}-cert + mountPath: "/kiali-cert" + - name: {{ include "kiali-server.fullname" . }}-secret + mountPath: "/kiali-secret" + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + - name: {{ include "kiali-server.fullname" . }}-cabundle + mountPath: "/kiali-cabundle" + {{- end }} + {{- if .Values.deployment.resources }} + resources: + {{- toYaml .Values.deployment.resources | nindent 10 }} + {{- end }} + volumes: + {{- if .Values.web_root_override }} + - name: kiali-console + configMap: + name: kiali-console + items: + - key: env.js + path: env.js + {{- end }} + - name: {{ include "kiali-server.fullname" . }}-configuration + configMap: + name: {{ include "kiali-server.fullname" . }} + - name: {{ include "kiali-server.fullname" . }}-cert + secret: + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + secretName: {{ include "kiali-server.fullname" . }}-cert-secret + {{- else }} + secretName: istio.{{ include "kiali-server.fullname" . }}-service-account + {{- end }} + {{- if not (include "kiali-server.identity.cert_file" .) }} + optional: true + {{- end }} + - name: {{ include "kiali-server.fullname" . }}-secret + secret: + secretName: {{ .Values.deployment.secret_name }} + optional: true + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + - name: {{ include "kiali-server.fullname" . }}-cabundle + configMap: + name: {{ include "kiali-server.fullname" . }}-cabundle + {{- end }} + {{- if or (.Values.deployment.affinity.node) (or (.Values.deployment.pod) (.Values.deployment.pod_anti)) }} + affinity: + {{- if .Values.deployment.affinity.node }} + nodeAffinity: + {{- toYaml .Values.deployment.affinity.node | nindent 10 }} + {{- end }} + {{- if .Values.deployment.affinity.pod }} + podAffinity: + {{- toYaml .Values.deployment.affinity.pod | nindent 10 }} + {{- end }} + {{- if .Values.deployment.affinity.pod_anti }} + podAntiAffinity: + {{- toYaml .Values.deployment.affinity.pod_anti | nindent 10 }} + {{- end }} + {{- end }} + {{- if .Values.deployment.tolerations }} + tolerations: + {{- toYaml .Values.deployment.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.deployment.node_selector }} + nodeSelector: + {{- toYaml .Values.deployment.node_selector | nindent 8 }} + {{- end }} +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/hpa.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/hpa.yaml new file mode 100755 index 000000000..934c4c1e9 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/hpa.yaml @@ -0,0 +1,17 @@ +{{- if .Values.deployment.hpa.spec }} +--- +apiVersion: {{ .Values.deployment.hpa.api_version }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "kiali-server.fullname" . }} + {{- toYaml .Values.deployment.hpa.spec | nindent 2 }} +... +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/ingress.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/ingress.yaml new file mode 100755 index 000000000..e4c98db1b --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/ingress.yaml @@ -0,0 +1,40 @@ +{{- if not (.Capabilities.APIVersions.Has "route.openshift.io/v1") }} +{{- if .Values.deployment.ingress_enabled }} +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + annotations: + {{- if hasKey .Values.deployment.override_ingress_yaml.metadata "annotations" }} + {{- toYaml .Values.deployment.override_ingress_yaml.metadata.annotations | nindent 4 }} + {{- else }} + # For ingress-nginx versions older than 0.20.0 use secure-backends. + # (see: https://github.com/kubernetes/ingress-nginx/issues/3416#issuecomment-438247948) + # For ingress-nginx versions 0.20.0 and later use backend-protocol. + {{- if (include "kiali-server.identity.cert_file" .) }} + nginx.ingress.kubernetes.io/secure-backends: "true" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + {{- else }} + nginx.ingress.kubernetes.io/secure-backends: "false" + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" + {{- end }} + {{- end }} +spec: + {{- if hasKey .Values.deployment.override_ingress_yaml "spec" }} + {{- toYaml .Values.deployment.override_ingress_yaml.spec | nindent 2 }} + {{- else }} + rules: + - http: + paths: + - path: {{ include "kiali-server.server.web_root" . }} + backend: + serviceName: {{ include "kiali-server.fullname" . }} + servicePort: {{ .Values.server.port }} + {{- end }} +... +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/oauth.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/oauth.yaml new file mode 100755 index 000000000..a178bb85e --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/oauth.yaml @@ -0,0 +1,17 @@ +{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} +{{- if .Values.kiali_route_url }} +--- +apiVersion: oauth.openshift.io/v1 +kind: OAuthClient +metadata: + name: {{ include "kiali-server.fullname" . }}-{{ .Release.Namespace }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +redirectURIs: +- {{ .Values.kiali_route_url }} +grantMethod: auto +allowAnyScope: true +... +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/psp.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/psp.yaml new file mode 100755 index 000000000..f891892cc --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/psp.yaml @@ -0,0 +1,67 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "kiali-server.fullname" . }}-psp + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "kiali-server.fullname" . }}-psp +subjects: + - kind: ServiceAccount + name: kiali +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "kiali-server.fullname" . }}-psp + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: + - policy + resourceNames: + - {{ include "kiali-server.fullname" . }}-psp + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "kiali-server.fullname" . }}-psp + namespace: {{ .Release.Namespace }} +spec: + allowPrivilegeEscalation: false + forbiddenSysctls: + - '*' + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + runAsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI + - persistentVolumeClaim +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/role-controlplane.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/role-controlplane.yaml new file mode 100755 index 000000000..a22c76756 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/role-controlplane.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "kiali-server.fullname" . }}-controlplane + namespace: {{ include "kiali-server.istio_namespace" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: + - secrets + verbs: + - list +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/role-viewer.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/role-viewer.yaml new file mode 100755 index 000000000..9fdd9fd1d --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/role-viewer.yaml @@ -0,0 +1,97 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "kiali-server.fullname" . }}-viewer + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: + - configmaps + - endpoints + - namespaces + - nodes + - pods + - pods/log + - pods/proxy + - replicationcontrollers + - services + verbs: + - get + - list + - watch +- apiGroups: [""] + resources: + - pods/portforward + verbs: + - create + - post +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch +- apiGroups: ["batch"] + resources: + - cronjobs + - jobs + verbs: + - get + - list + - watch +- apiGroups: + - networking.istio.io + - security.istio.io + resources: ["*"] + verbs: + - get + - list + - watch +- apiGroups: ["apps.openshift.io"] + resources: + - deploymentconfigs + verbs: + - get + - list + - watch +- apiGroups: ["project.openshift.io"] + resources: + - projects + verbs: + - get +- apiGroups: ["route.openshift.io"] + resources: + - routes + verbs: + - get +- apiGroups: ["monitoring.kiali.io"] + resources: + - monitoringdashboards + verbs: + - get + - list +- apiGroups: ["iter8.tools"] + resources: + - experiments + verbs: + - get + - list +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: + - create +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/role.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/role.yaml new file mode 100755 index 000000000..8444bc753 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/role.yaml @@ -0,0 +1,108 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "kiali-server.fullname" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: + - configmaps + - endpoints + - namespaces + - nodes + - pods + - pods/log + - pods/proxy + - replicationcontrollers + - services + verbs: + - get + - list + - patch + - watch +- apiGroups: [""] + resources: + - pods/portforward + verbs: + - create + - post +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - patch + - watch +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch +- apiGroups: ["batch"] + resources: + - cronjobs + - jobs + verbs: + - get + - list + - patch + - watch +- apiGroups: + - networking.istio.io + - security.istio.io + resources: ["*"] + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: ["apps.openshift.io"] + resources: + - deploymentconfigs + verbs: + - get + - list + - patch + - watch +- apiGroups: ["project.openshift.io"] + resources: + - projects + verbs: + - get +- apiGroups: ["route.openshift.io"] + resources: + - routes + verbs: + - get +- apiGroups: ["monitoring.kiali.io"] + resources: + - monitoringdashboards + verbs: + - get + - list +- apiGroups: ["iter8.tools"] + resources: + - experiments + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: + - create +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/rolebinding-controlplane.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/rolebinding-controlplane.yaml new file mode 100755 index 000000000..5a0015836 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/rolebinding-controlplane.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "kiali-server.fullname" . }}-controlplane + namespace: {{ include "kiali-server.istio_namespace" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "kiali-server.fullname" . }}-controlplane +subjects: +- kind: ServiceAccount + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/rolebinding.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/rolebinding.yaml new file mode 100755 index 000000000..1eaabd65f --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/rolebinding.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "kiali-server.fullname" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + {{- if .Values.deployment.view_only_mode }} + name: {{ include "kiali-server.fullname" . }}-viewer + {{- else }} + name: {{ include "kiali-server.fullname" . }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/route.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/route.yaml new file mode 100755 index 000000000..27940dc96 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/route.yaml @@ -0,0 +1,30 @@ +{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} +{{- if .Values.deployment.ingress_enabled }} +# As of OpenShift 4.5, need to use --disable-openapi-validation when installing via Helm +--- +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + {{- if hasKey .Values.deployment.override_ingress_yaml.metadata "annotations" }}} + annotations: + {{- toYaml .Values.deployment.override_ingress_yaml.metadata.annotations | nindent 4 }} + {{- end }} +spec: + {{- if hasKey .Values.deployment.override_ingress_yaml "spec" }} + {{- toYaml .Values.deployment.override_ingress_yaml.spec | nindent 2 }} + {{- else }} + tls: + termination: reencrypt + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + targetPort: {{ .Values.server.port }} + name: {{ include "kiali-server.fullname" . }} + {{- end }} +... +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/service.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/service.yaml new file mode 100755 index 000000000..9ccf4f388 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/service.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + annotations: + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + service.beta.openshift.io/serving-cert-secret-name: {{ include "kiali-server.fullname" . }}-cert-secret + {{- end }} + kiali.io/api-spec: https://kiali.io/api + kiali.io/api-type: rest + {{- if and (not (empty .Values.server.web_fqdn)) (not (empty .Values.server.web_schema)) }} + {{- if empty .Values.server.web_port }} + kiali.io/external-url: {{ .Values.server.web_schema }}://{{ .Values.server.web_fqdn }}{{ default "" .Values.server.web_root }} + {{- else }} + kiali.io/external-url: {{ .Values.server.web_schema }}://{{ .Values.server.web_fqdn }}:{{ .Values.server.web_port }}{{(default "" .Values.server.web_root) }} + {{- end }} + {{- end }} + {{- if .Values.deployment.service_annotations }} + {{- toYaml .Values.deployment.service_annotations | nindent 4 }} + {{- end }} +spec: + {{- if .Values.deployment.service_type }} + type: {{ .Values.deployment.service_type }} + {{- end }} + ports: + {{- if (include "kiali-server.identity.cert_file" .) }} + - name: tcp + {{- else }} + - name: http + {{- end }} + protocol: TCP + port: {{ .Values.server.port }} + {{- if .Values.server.metrics_enabled }} + - name: http-metrics + protocol: TCP + port: {{ .Values.server.metrics_port }} + {{- end }} + selector: + {{- include "kiali-server.selectorLabels" . | nindent 4 }} + {{- if .Values.deployment.additional_service_yaml }} + {{- toYaml .Values.deployment.additional_service_yaml | nindent 2 }} + {{- end }} +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/serviceaccount.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/serviceaccount.yaml new file mode 100755 index 000000000..9151b6f6a --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/serviceaccount.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +... diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/validate-install-crd.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/validate-install-crd.yaml new file mode 100755 index 000000000..b42eeb266 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/validate-install-crd.yaml @@ -0,0 +1,14 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +# {{- $found := dict -}} +# {{- set $found "monitoring.kiali.io/v1alpha1/MonitoringDashboard" false -}} +# {{- range .Capabilities.APIVersions -}} +# {{- if hasKey $found (toString .) -}} +# {{- set $found (toString .) true -}} +# {{- end -}} +# {{- end -}} +# {{- range $_, $exists := $found -}} +# {{- if (eq $exists false) -}} +# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}} +# {{- end -}} +# {{- end -}} +#{{- end -}} \ No newline at end of file diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/web-root-configmap.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/web-root-configmap.yaml new file mode 100755 index 000000000..970d4e4f5 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/templates/web-root-configmap.yaml @@ -0,0 +1,12 @@ +{{- if .Values.web_root_override }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: kiali-console + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +data: + env.js: | + window.WEB_ROOT='/k8s/clusters/{{ .Values.global.cattle.clusterId }}/api/v1/namespaces/{{ .Release.Namespace }}/services/http:kiali:20001/proxy/kiali'; +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/values.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/values.yaml new file mode 100755 index 000000000..aada4e09a --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/kiali/values.yaml @@ -0,0 +1,93 @@ +nameOverride: "kiali" +fullnameOverride: "kiali" + +# This is required for "openshift" auth strategy. +# You have to know ahead of time what your Route URL will be because +# right now the helm chart can't figure this out at runtime (it would +# need to wait for the Kiali Route to be deployed and for OpenShift +# to start it up). If someone knows how to update this helm chart to +# do this, a PR would be welcome. +kiali_route_url: "" + +# rancher specific override that allows proxy access to kiali url +web_root_override: true + +# +# Settings that mimic the Kiali CR which are placed in the ConfigMap. +# Note that only those values used by the Helm Chart will be here. +# + +istio_namespace: "" # default is where Kiali is installed + +auth: + openid: {} + openshift: {} + strategy: "" + +deployment: + # This only limits what Kiali will attempt to see, but Kiali Service Account has permissions to see everything. + # For more control over what the Kial Service Account can see, use the Kiali Operator + accessible_namespaces: + - "**" + additional_service_yaml: {} + affinity: + node: {} + pod: {} + pod_anti: {} + custom_dashboards: + excludes: [''] + includes: ['*'] + hpa: + api_version: "autoscaling/v2beta2" + spec: {} + repository: rancher/mirrored-kiali-kiali + image_pull_policy: "Always" + image_pull_secrets: [] + tag: v1.32.0 + ingress_enabled: true + logger: + log_format: "text" + log_level: "info" + time_field_format: "2006-01-02T15:04:05Z07:00" + sampler_rate: "1" + node_selector: {} + override_ingress_yaml: + metadata: {} + pod_annotations: {} + pod_labels: {} + priority_class_name: "" + replicas: 1 + resources: {} + secret_name: "kiali" + service_annotations: {} + service_type: "" + tolerations: [] + version_label: v1.32.0 + view_only_mode: false + +external_services: + custom_dashboards: + enabled: true + +identity: {} + #cert_file: + #private_key_file: + +login_token: + signing_key: "" + +server: + port: 20001 + metrics_enabled: true + metrics_port: 9090 + web_root: "" + +# Common settings used among istio subcharts. +global: + # Specify rancher clusterId of external tracing config + # https://github.com/istio/istio.io/issues/4146#issuecomment-493543032 + cattle: + systemDefaultRegistry: "" + clusterId: + rbac: + pspEnabled: false diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/.helmignore b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/Chart.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/Chart.yaml new file mode 100755 index 000000000..6e368616d --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/Chart.yaml @@ -0,0 +1,12 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: istio-system + catalog.rancher.io/release-name: rancher-tracing +apiVersion: v1 +appVersion: 1.20.0 +description: A quick start Jaeger Tracing installation using the all-in-one demo. + This is not production qualified. Refer to https://www.jaegertracing.io/ for details. +name: tracing +version: 1.20.1 diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/README.md b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/README.md new file mode 100755 index 000000000..25534c628 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/README.md @@ -0,0 +1,5 @@ +# Jaeger + +A Rancher chart based on the Jaeger all-in-one quick installation option. This chart will allow you to trace and monitor distributed microservices. + +> **Note:** The basic all-in-one Jaeger installation which is not qualified for production. Use the [Jaeger Tracing](https://www.jaegertracing.io) documentation to determine which installation you will need for your production needs. diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/_affinity.tpl b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/_affinity.tpl new file mode 100755 index 000000000..bf6a9aee5 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/_affinity.tpl @@ -0,0 +1,92 @@ +{{/* affinity - https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ */}} +{{- define "nodeAffinity" }} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + {{- include "nodeAffinityRequiredDuringScheduling" . }} + preferredDuringSchedulingIgnoredDuringExecution: + {{- include "nodeAffinityPreferredDuringScheduling" . }} +{{- end }} + +{{- define "nodeAffinityRequiredDuringScheduling" }} + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + {{- range $key, $val := .Values.global.arch }} + {{- if gt ($val | int) 0 }} + - {{ $key | quote }} + {{- end }} + {{- end }} + {{- $nodeSelector := default .Values.global.defaultNodeSelector .Values.nodeSelector -}} + {{- range $key, $val := $nodeSelector }} + - key: {{ $key }} + operator: In + values: + - {{ $val | quote }} + {{- end }} +{{- end }} + +{{- define "nodeAffinityPreferredDuringScheduling" }} + {{- range $key, $val := .Values.global.arch }} + {{- if gt ($val | int) 0 }} + - weight: {{ $val | int }} + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - {{ $key | quote }} + {{- end }} + {{- end }} +{{- end }} + +{{- define "podAntiAffinity" }} +{{- if or .Values.podAntiAffinityLabelSelector .Values.podAntiAffinityTermLabelSelector}} + podAntiAffinity: + {{- if .Values.podAntiAffinityLabelSelector }} + requiredDuringSchedulingIgnoredDuringExecution: + {{- include "podAntiAffinityRequiredDuringScheduling" . }} + {{- end }} + {{- if or .Values.podAntiAffinityTermLabelSelector}} + preferredDuringSchedulingIgnoredDuringExecution: + {{- include "podAntiAffinityPreferredDuringScheduling" . }} + {{- end }} +{{- end }} +{{- end }} + +{{- define "podAntiAffinityRequiredDuringScheduling" }} + {{- range $index, $item := .Values.podAntiAffinityLabelSelector }} + - labelSelector: + matchExpressions: + - key: {{ $item.key }} + operator: {{ $item.operator }} + {{- if $item.values }} + values: + {{- $vals := split "," $item.values }} + {{- range $i, $v := $vals }} + - {{ $v | quote }} + {{- end }} + {{- end }} + topologyKey: {{ $item.topologyKey }} + {{- end }} +{{- end }} + +{{- define "podAntiAffinityPreferredDuringScheduling" }} + {{- range $index, $item := .Values.podAntiAffinityTermLabelSelector }} + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: {{ $item.key }} + operator: {{ $item.operator }} + {{- if $item.values }} + values: + {{- $vals := split "," $item.values }} + {{- range $i, $v := $vals }} + - {{ $v | quote }} + {{- end }} + {{- end }} + topologyKey: {{ $item.topologyKey }} + weight: 100 + {{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/_helpers.tpl b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/_helpers.tpl new file mode 100755 index 000000000..56cfa7335 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "tracing.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "tracing.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/deployment.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/deployment.yaml new file mode 100755 index 000000000..25bb67fd3 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/deployment.yaml @@ -0,0 +1,86 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + app: {{ .Values.provider }} + template: + metadata: + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + sidecar.istio.io/inject: "false" + prometheus.io/scrape: "true" + prometheus.io/port: "14269" +{{- if .Values.jaeger.podAnnotations }} +{{ toYaml .Values.jaeger.podAnnotations | indent 8 }} +{{- end }} + spec: + containers: + - name: jaeger + image: "{{ template "system_default_registry" . }}{{ .Values.jaeger.repository }}:{{ .Values.jaeger.tag }}" + imagePullPolicy: {{ .Values.global.imagePullPolicy }} + env: + {{- if eq .Values.jaeger.spanStorageType "badger" }} + - name: BADGER_EPHEMERAL + value: "false" + - name: SPAN_STORAGE_TYPE + value: "badger" + - name: BADGER_DIRECTORY_VALUE + value: "/badger/data" + - name: BADGER_DIRECTORY_KEY + value: "/badger/key" + {{- end }} + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: MEMORY_MAX_TRACES + value: "{{ .Values.jaeger.memory.max_traces }}" + - name: QUERY_BASE_PATH + value: {{ if .Values.contextPath }} {{ .Values.contextPath }} {{ else }} /{{ .Values.provider }} {{ end }} + livenessProbe: + httpGet: + path: / + port: 14269 + readinessProbe: + httpGet: + path: / + port: 14269 +{{- if eq .Values.jaeger.spanStorageType "badger" }} + volumeMounts: + - name: data + mountPath: /badger +{{- end }} + resources: +{{- if .Values.jaeger.resources }} +{{ toYaml .Values.jaeger.resources | indent 12 }} +{{- else }} +{{ toYaml .Values.global.defaultResources | indent 12 }} +{{- end }} + affinity: + {{- include "nodeAffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} + {{- if .Values.global.rbac.pspEnabled }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + serviceAccountName: {{ include "tracing.fullname" . }} + {{- end }} +{{- if eq .Values.jaeger.spanStorageType "badger" }} + volumes: + - name: data +{{- if .Values.jaeger.persistentVolumeClaim.enabled }} + persistentVolumeClaim: + claimName: istio-jaeger-pvc +{{- else }} + emptyDir: {} +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/psp.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/psp.yaml new file mode 100755 index 000000000..44b230492 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/psp.yaml @@ -0,0 +1,86 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "tracing.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "tracing.fullname" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: +- apiGroups: + - policy + resourceNames: + - {{ include "tracing.fullname" . }} + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + forbiddenSysctls: + - '*' + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + runAsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - emptyDir + - secret + - persistentVolumeClaim +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/pvc.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/pvc.yaml new file mode 100755 index 000000000..9b4c55e4f --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/pvc.yaml @@ -0,0 +1,16 @@ +{{- if .Values.jaeger.persistentVolumeClaim.enabled }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: istio-jaeger-pvc + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} +spec: + storageClassName: {{ .Values.jaeger.storageClassName }} + accessModes: + - {{ .Values.jaeger.accessMode }} + resources: + requests: + storage: {{.Values.jaeger.persistentVolumeClaim.storage }} +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/service.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/service.yaml new file mode 100755 index 000000000..4210a9b5f --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/templates/service.yaml @@ -0,0 +1,63 @@ +apiVersion: v1 +kind: Service +metadata: + name: tracing + namespace: {{ .Release.Namespace }} + annotations: + {{- range $key, $val := .Values.service.annotations }} + {{ $key }}: {{ $val | quote }} + {{- end }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + type: {{ .Values.service.type }} + ports: + - name: {{ .Values.service.name }} + port: {{ .Values.service.externalPort }} + protocol: TCP + targetPort: 16686 + selector: + app: {{ .Values.provider }} +--- +# Jaeger implements the Zipkin API. To support swapping out the tracing backend, we use a Service named Zipkin. +apiVersion: v1 +kind: Service +metadata: + name: zipkin + namespace: {{ .Release.Namespace }} + labels: + name: zipkin + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + ports: + - name: {{ .Values.service.name }} + port: {{ .Values.zipkin.queryPort }} + targetPort: {{ .Values.zipkin.queryPort }} + selector: + app: {{ .Values.provider }} +--- +apiVersion: v1 +kind: Service +metadata: + name: jaeger-collector + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + type: ClusterIP + ports: + - name: jaeger-collector-http + port: 14268 + targetPort: 14268 + protocol: TCP + - name: jaeger-collector-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + selector: + app: {{ .Values.provider }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/values.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/values.yaml new file mode 100755 index 000000000..18ff81c3c --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/charts/tracing/values.yaml @@ -0,0 +1,44 @@ +provider: jaeger +contextPath: "" +nodeSelector: {} +podAntiAffinityLabelSelector: [] +podAntiAffinityTermLabelSelector: [] +nameOverride: "" +fullnameOverride: "" + +global: + cattle: + systemDefaultRegistry: "" + defaultResources: {} + imagePullPolicy: IfNotPresent + imagePullSecrets: [] + arch: + amd64: 2 + s390x: 2 + ppc64le: 2 + defaultNodeSelector: {} + rbac: + pspEnabled: false + +jaeger: + repository: rancher/mirrored-jaegertracing-all-in-one + tag: 1.20.0 + # spanStorageType value can be "memory" and "badger" for all-in-one image + spanStorageType: badger + resources: + requests: + cpu: 10m + persistentVolumeClaim: + enabled: false + storage: 5Gi + storageClassName: "" + accessMode: ReadWriteMany + memory: + max_traces: 50000 +zipkin: + queryPort: 9411 +service: + annotations: {} + name: http-query + type: ClusterIP + externalPort: 16686 diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/configs/istio-base.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/configs/istio-base.yaml new file mode 100755 index 000000000..7ff972e2d --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/configs/istio-base.yaml @@ -0,0 +1,89 @@ +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + addonComponents: + istiocoredns: + enabled: {{ .Values.istiocoredns.enabled }} + components: + base: + enabled: {{ .Values.base.enabled }} + cni: + enabled: {{ .Values.cni.enabled }} + egressGateways: + - enabled: {{ .Values.egressGateways.enabled }} + name: istio-egressgateway + ingressGateways: + - enabled: {{ .Values.ingressGateways.enabled }} + name: istio-ingressgateway + k8s: + service: + ports: + - name: status-port + port: 15021 + targetPort: 15021 + - name: http2 + port: 80 + targetPort: 8080 + nodePort: 31380 + - name: https + port: 443 + targetPort: 8443 + nodePort: 31390 + - name: tcp + port: 31400 + targetPort: 31400 + nodePort: 31400 + - name: tls + port: 15443 + targetPort: 15443 + istiodRemote: + enabled: {{ .Values.istiodRemote.enabled }} + pilot: + enabled: {{ .Values.pilot.enabled }} + hub: {{ .Values.systemDefaultRegistry | default "docker.io" }} + profile: default + tag: {{ .Values.tag }} + revision: {{ .Values.revision }} + meshConfig: + defaultConfig: + proxyMetadata: + {{- if .Values.dns.enabled }} + ISTIO_META_DNS_CAPTURE: "true" + {{- end }} + values: + gateways: + istio-egressgateway: + name: istio-egressgateway + type: {{ .Values.egressGateways.type }} + istio-ingressgateway: + name: istio-ingressgateway + type: {{ .Values.ingressGateways.type }} + global: + istioNamespace: {{ template "istio.namespace" . }} + proxy: + image: {{ template "system_default_registry" . }}{{ .Values.global.proxy.repository }}:{{ .Values.global.proxy.tag }} + proxy_init: + image: {{ template "system_default_registry" . }}{{ .Values.global.proxy_init.repository }}:{{ .Values.global.proxy_init.tag }} + {{- if .Values.global.defaultPodDisruptionBudget.enabled }} + defaultPodDisruptionBudget: + enabled: {{ .Values.global.defaultPodDisruptionBudget.enabled }} + {{- end }} + istiocoredns: + coreDNSImage: {{ template "system_default_registry" . }}{{ .Values.istiocoredns.image.repository }} + coreDNSPluginImage: {{ template "system_default_registry" . }}{{ .Values.istiocoredns.pluginImage.repository }}:{{ .Values.istiocoredns.pluginImage.tag }} + coreDNSTag: {{ .Values.istiocoredns.image.tag }} + {{- if .Values.pilot.enabled }} + pilot: + image: {{ template "system_default_registry" . }}{{ .Values.pilot.repository }}:{{ .Values.pilot.tag }} + {{- end }} + telemetry: + enabled: {{ .Values.telemetry.enabled }} + v2: + enabled: {{ .Values.telemetry.v2.enabled }} + {{- if .Values.cni.enabled }} + cni: + image: {{ template "system_default_registry" . }}{{ .Values.cni.repository }}:{{ .Values.cni.tag }} + excludeNamespaces: + {{- toYaml .Values.cni.excludeNamespaces | nindent 8 }} + logLevel: {{ .Values.cni.logLevel }} + {{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/requirements.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/requirements.yaml new file mode 100755 index 000000000..b60745780 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/requirements.yaml @@ -0,0 +1,17 @@ +dependencies: +- name: kiali + version: "" + repository: file://./charts/kiali + condition: kiali.enabled + tags: [] + enabled: false + importvalues: [] + alias: "" +- name: tracing + version: "" + repository: file://./charts/tracing + condition: tracing.enabled + tags: [] + enabled: false + importvalues: [] + alias: "" diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/samples/overlay-example.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/samples/overlay-example.yaml new file mode 100755 index 000000000..5cf3cf3b0 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/samples/overlay-example.yaml @@ -0,0 +1,37 @@ +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + components: + ingressGateways: + - enabled: true + name: ilb-gateway + namespace: user-ingressgateway-ns + k8s: + resources: + requests: + cpu: 200m + service: + ports: + - name: tcp-citadel-grpc-tls + port: 8060 + targetPort: 8060 + - name: tcp-dns + port: 5353 + serviceAnnotations: + cloud.google.com/load-balancer-type: internal + - enabled: true + name: other-gateway + namespace: cattle-istio-system + k8s: + resources: + requests: + cpu: 200m + service: + ports: + - name: tcp-citadel-grpc-tls + port: 8060 + targetPort: 8060 + - name: tcp-dns + port: 5353 + serviceAnnotations: + cloud.google.com/load-balancer-type: internal diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/_helpers.tpl b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/_helpers.tpl new file mode 100755 index 000000000..3f7af953a --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/_helpers.tpl @@ -0,0 +1,12 @@ +{{/* Ensure namespace is set the same everywhere */}} +{{- define "istio.namespace" -}} + {{- .Release.Namespace | default "istio-system" -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/admin-role.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/admin-role.yaml new file mode 100755 index 000000000..ad1313c4f --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/admin-role.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: istio-admin + namespace: {{ template "istio.namespace" . }} +rules: + - apiGroups: + - config.istio.io + resources: + - adapters + - attributemanifests + - handlers + - httpapispecbindings + - httpapispecs + - instances + - quotaspecbindings + - quotaspecs + - rules + - templates + verbs: ["get", "watch", "list"] + - apiGroups: + - networking.istio.io + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + - workloadentries + verbs: + - '*' + - apiGroups: + - security.istio.io + resources: + - authorizationpolicies + - peerauthentications + - requestauthentications + verbs: + - '*' diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/base-config-map.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/base-config-map.yaml new file mode 100755 index 000000000..5323917bc --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/base-config-map.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-installer-base + namespace: {{ template "istio.namespace" . }} +data: +{{ tpl (.Files.Glob "configs/*").AsConfig . | indent 2 }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/clusterrole.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/clusterrole.yaml new file mode 100755 index 000000000..a93b3df95 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/clusterrole.yaml @@ -0,0 +1,120 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-installer +rules: +# istio groups +- apiGroups: + - authentication.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - config.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - install.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - networking.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - rbac.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - security.istio.io + resources: + - '*' + verbs: + - '*' +# k8s groups +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions.apiextensions.k8s.io + - customresourcedefinitions + verbs: + - '*' +- apiGroups: + - apps + - extensions + resources: + - daemonsets + - deployments + - deployments/finalizers + - ingresses + - replicasets + - statefulsets + verbs: + - '*' +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - '*' +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - roles + - rolebindings + verbs: + - '*' +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - namespaces + - pods + - pods/exec + - persistentvolumeclaims + - secrets + - services + - serviceaccounts + verbs: + - '*' +- apiGroups: + - policy + resourceNames: + - istio-installer + resources: + - podsecuritypolicies + verbs: + - use diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/clusterrolebinding.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/clusterrolebinding.yaml new file mode 100755 index 000000000..9d74a0434 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-installer +subjects: +- kind: ServiceAccount + name: istio-installer + namespace: {{ template "istio.namespace" . }} +roleRef: + kind: ClusterRole + name: istio-installer + apiGroup: rbac.authorization.k8s.io diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/edit-role.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/edit-role.yaml new file mode 100755 index 000000000..d1059d58d --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/edit-role.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + namespace: {{ template "istio.namespace" . }} + name: istio-edit +rules: + - apiGroups: + - config.istio.io + resources: + - adapters + - attributemanifests + - handlers + - httpapispecbindings + - httpapispecs + - instances + - quotaspecbindings + - quotaspecs + - rules + - templates + verbs: ["get", "watch", "list"] + - apiGroups: + - networking.istio.io + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + - workloadentries + verbs: + - '*' + - apiGroups: + - security.istio.io + resources: + - authorizationpolicies + - peerauthentications + - requestauthentications + verbs: + - '*' diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-cni-psp.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-cni-psp.yaml new file mode 100755 index 000000000..5b94c8503 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-cni-psp.yaml @@ -0,0 +1,51 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp-istio-cni + namespace: {{ template "istio.namespace" . }} +spec: + allowPrivilegeEscalation: true + fsGroup: + rule: RunAsAny + hostNetwork: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - secret + - configMap + - emptyDir + - hostPath +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: psp-istio-cni + namespace: {{ template "istio.namespace" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: psp-istio-cni +subjects: + - kind: ServiceAccount + name: istio-cni +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: psp-istio-cni + namespace: {{ template "istio.namespace" . }} +rules: +- apiGroups: + - policy + resourceNames: + - psp-istio-cni + resources: + - podsecuritypolicies + verbs: + - use +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-install-job.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-install-job.yaml new file mode 100755 index 000000000..9a13f5698 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-install-job.yaml @@ -0,0 +1,50 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: istioctl-installer + namespace: {{ template "istio.namespace" . }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + backoffLimit: 1 + template: + spec: + containers: + - name: istioctl-installer + image: {{ template "system_default_registry" . }}{{ .Values.installer.repository }}:{{ .Values.installer.tag }} + env: + - name: RELEASE_NAME + value: {{ .Release.Name }} + - name: ISTIO_NAMESPACE + value: {{ template "istio.namespace" . }} + - name: FORCE_INSTALL + value: {{ .Values.forceInstall | default "false" | quote }} + command: ["/bin/sh","-c"] + args: ["/usr/local/app/scripts/run.sh"] + volumeMounts: + - name: config-volume + mountPath: /app/istio-base.yaml + subPath: istio-base.yaml + {{- if .Values.overlayFile }} + - name: overlay-volume + mountPath: /app/overlay-config.yaml + subPath: overlay-config.yaml + {{- end }} + volumes: + - name: config-volume + configMap: + name: istio-installer-base + {{- if .Values.overlayFile }} + - name: overlay-volume + configMap: + name: istio-installer-overlay + {{- end }} + serviceAccountName: istio-installer + {{- if .Values.global.rbac.pspEnabled }} + securityContext: + runAsUser: 101 + runAsGroup: 101 + {{- end }} + restartPolicy: Never diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-install-psp.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-install-psp.yaml new file mode 100755 index 000000000..f0b5ee565 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-install-psp.yaml @@ -0,0 +1,30 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: istio-installer + namespace: {{ template "istio.namespace" . }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'configMap' + - 'secret' +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-psp.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-psp.yaml new file mode 100755 index 000000000..b3758b74f --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-psp.yaml @@ -0,0 +1,81 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: istio-psp + namespace: {{ template "istio.namespace" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: istio-psp +subjects: + - kind: ServiceAccount + name: istio-egressgateway-service-account + - kind: ServiceAccount + name: istio-ingressgateway-service-account + - kind: ServiceAccount + name: istio-mixer-service-account + - kind: ServiceAccount + name: istio-operator-authproxy + - kind: ServiceAccount + name: istiod-service-account + - kind: ServiceAccount + name: istio-sidecar-injector-service-account + - kind: ServiceAccount + name: istiocoredns-service-account + - kind: ServiceAccount + name: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: istio-psp + namespace: {{ template "istio.namespace" . }} +rules: +- apiGroups: + - policy + resourceNames: + - istio-psp + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: istio-psp + namespace: {{ template "istio.namespace" . }} +spec: + allowPrivilegeEscalation: false + forbiddenSysctls: + - '*' + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + runAsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI + - persistentVolumeClaim +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-uninstall-job.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-uninstall-job.yaml new file mode 100755 index 000000000..a7f156325 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/istio-uninstall-job.yaml @@ -0,0 +1,45 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: istioctl-uninstaller + namespace: {{ template "istio.namespace" . }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + spec: + containers: + - name: istioctl-uninstaller + image: {{ template "system_default_registry" . }}{{ .Values.installer.repository }}:{{ .Values.installer.tag }} + env: + - name: RELEASE_NAME + value: {{ .Release.Name }} + - name: ISTIO_NAMESPACE + value: {{ template "istio.namespace" . }} + command: ["/bin/sh","-c"] + args: ["/usr/local/app/scripts/uninstall_istio_system.sh"] + volumeMounts: + - name: config-volume + mountPath: /app/istio-base.yaml + subPath: istio-base.yaml + {{- if .Values.overlayFile }} + - name: overlay-volume + mountPath: /app/overlay-config.yaml + subPath: overlay-config.yaml + {{ end }} + volumes: + - name: config-volume + configMap: + name: istio-installer-base + {{- if .Values.overlayFile }} + - name: overlay-volume + configMap: + name: istio-installer-overlay + {{ end }} + serviceAccountName: istio-installer + securityContext: + runAsUser: 101 + runAsGroup: 101 + restartPolicy: OnFailure diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/overlay-config-map.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/overlay-config-map.yaml new file mode 100755 index 000000000..287d26b2c --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/overlay-config-map.yaml @@ -0,0 +1,9 @@ +{{- if .Values.overlayFile }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-installer-overlay + namespace: {{ template "istio.namespace" . }} +data: + overlay-config.yaml: {{ toYaml .Values.overlayFile | indent 2 }} +{{- end }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/service-monitors.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/service-monitors.yaml new file mode 100755 index 000000000..c3d60c4fc --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/service-monitors.yaml @@ -0,0 +1,51 @@ +{{- if .Values.kiali.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: envoy-stats-monitor + namespace: {{ template "istio.namespace" . }} + labels: + monitoring: istio-proxies +spec: + selector: + matchExpressions: + - {key: istio-prometheus-ignore, operator: DoesNotExist} + namespaceSelector: + any: true + jobLabel: envoy-stats + endpoints: + - path: /stats/prometheus + targetPort: 15090 + interval: 15s + relabelings: + - sourceLabels: [__meta_kubernetes_pod_container_port_name] + action: keep + regex: '.*-envoy-prom' + - action: labeldrop + regex: "__meta_kubernetes_pod_label_(.+)" + - sourceLabels: [__meta_kubernetes_namespace] + action: replace + targetLabel: namespace + - sourceLabels: [__meta_kubernetes_pod_name] + action: replace + targetLabel: pod_name +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: istio-component-monitor + namespace: {{ template "istio.namespace" . }} + labels: + monitoring: istio-components +spec: + jobLabel: istio + targetLabels: [app] + selector: + matchExpressions: + - {key: istio, operator: In, values: [pilot]} + namespaceSelector: + any: true + endpoints: + - port: http-monitoring + interval: 15s +{{- end -}} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/serviceaccount.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/serviceaccount.yaml new file mode 100755 index 000000000..82b6cbb7e --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-installer + namespace: {{ template "istio.namespace" . }} diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/view-role.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/view-role.yaml new file mode 100755 index 000000000..5947d3eba --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/templates/view-role.yaml @@ -0,0 +1,41 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + namespace: {{ template "istio.namespace" . }} + name: istio-view +rules: + - apiGroups: + - config.istio.io + resources: + - adapters + - attributemanifests + - handlers + - httpapispecbindings + - httpapispecs + - instances + - quotaspecbindings + - quotaspecs + - rules + - templates + verbs: ["get", "watch", "list"] + - apiGroups: + - networking.istio.io + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + - workloadentries + verbs: ["get", "watch", "list"] + - apiGroups: + - security.istio.io + resources: + - authorizationpolicies + - peerauthentications + - requestauthentications + verbs: ["get", "watch", "list"] diff --git a/charts/rancher-istio-1.8/rancher-istio/1.8.600/values.yaml b/charts/rancher-istio-1.8/rancher-istio/1.8.600/values.yaml new file mode 100755 index 000000000..cae665177 --- /dev/null +++ b/charts/rancher-istio-1.8/rancher-istio/1.8.600/values.yaml @@ -0,0 +1,95 @@ +overlayFile: "" +tag: 1.8.6 +##Setting forceInstall: true will remove the check for istio version < 1.6.x and will not analyze your install cluster prior to install +forceInstall: false + +installer: + repository: rancher/istio-installer + tag: 1.8.6-rancher1 + +##Deprecated as of 1.8 and unsupported in 1.9, native support provided by enabling `dns.enabled=true` +istiocoredns: + enabled: false + image: + repository: rancher/mirrored-coredns-coredns + tag: 1.6.2 + pluginImage: + repository: rancher/mirrored-istio-coredns-plugin + tag: 0.2-istio-1.1 + +##Native support for dns added in 1.8 +dns: + enabled: false + +base: + enabled: true + +cni: + enabled: false + repository: rancher/mirrored-istio-install-cni + tag: 1.8.6 + logLevel: info + excludeNamespaces: + - istio-system + - kube-system + +egressGateways: + enabled: false + type: NodePort + +ingressGateways: + enabled: true + type: NodePort + +istiodRemote: + enabled: false + +pilot: + enabled: true + repository: rancher/mirrored-istio-pilot + tag: 1.8.6 + +telemetry: + enabled: true + v2: + enabled: true + +global: + cattle: + systemDefaultRegistry: "" + proxy: + repository: rancher/mirrored-istio-proxyv2 + tag: 1.8.6 + proxy_init: + repository: rancher/mirrored-istio-proxyv2 + tag: 1.8.6 + defaultPodDisruptionBudget: + enabled: true + rbac: + pspEnabled: false + +# Kiali subchart from rancher-kiali-server +kiali: + enabled: true + auth: + strategy: anonymous + deployment: + ingress_enabled: false + repository: rancher/mirrored-kiali-kiali + tag: v1.32.0 + external_services: + prometheus: + custom_metrics_url: "http://rancher-monitoring-prometheus.cattle-monitoring-system.svc:9090" + url: "http://rancher-monitoring-prometheus.cattle-monitoring-system.svc:9090" + tracing: + in_cluster_url: "http://tracing.istio-system.svc:16686/jaeger" + grafana: + in_cluster_url: "http://rancher-monitoring-grafana.cattle-monitoring-system.svc:80" + url: "http://rancher-monitoring-grafana.cattle-monitoring-system.svc:80" + +tracing: + enabled: false + contextPath: "/jaeger" + jaeger: + repository: rancher/mirrored-jaegertracing-all-in-one + tag: 1.20.0 diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/Chart.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/Chart.yaml new file mode 100755 index 000000000..cfd5826eb --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/Chart.yaml @@ -0,0 +1,21 @@ +annotations: + catalog.cattle.io/auto-install: rancher-kiali-server-crd=1.32.100 + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: Istio + catalog.cattle.io/namespace: istio-system + catalog.cattle.io/os: linux + catalog.cattle.io/provides-gvr: networking.istio.io.virtualservice/v1beta1 + catalog.cattle.io/release-name: rancher-istio + catalog.cattle.io/requests-cpu: 710m + catalog.cattle.io/requests-memory: 2314Mi + catalog.cattle.io/ui-component: istio +apiVersion: v1 +appVersion: 1.9.3 +description: A basic Istio setup that installs with the istioctl. Refer to https://istio.io/latest/ + for details. +icon: https://charts.rancher.io/assets/logos/istio.svg +keywords: +- networking +- infrastructure +name: rancher-istio +version: 1.9.300 diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/README.md b/charts/rancher-istio-1.9/rancher-istio/1.9.300/README.md new file mode 100755 index 000000000..199e45312 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/README.md @@ -0,0 +1,69 @@ +# Rancher Istio Installers + +A Rancher created chart that packages the istioctl binary to install via a helm chart. + +# Installation Requirements + +## Chart Dependencies +- rancher-kiali-server-crd chart + +# Uninstallation Requirements +To ensure rancher-istio uninstalls correctly, you must uninstall rancher-istio prior to uninstalling chart dependencies (see installation requirements for chart dependencies). This is because all definitions need to be available in order to properly build the rancher-istio objects for removal. + +If you remove dependent CRD charts prior to removing rancher-istio, you may encounter the following error:: + +`Error: uninstallation completed with 1 error(s): unable to build kubernetes objects for delete: unable to recognize "": no matches for kind "MonitoringDashboard" in version "monitoring.kiali.io/v1alpha1"` + +# Addons + +## Kiali + +Kiali allows you to view and manage your istio-based service mesh through an easy to use dashboard. + +#### Dependencies +- rancher-monitoring chart or other Prometheus installation + +This dependecy installs the required CRDs for installing Kiali. Since Kiali is bundled in with Istio in this chart, if you do not have these dependencies installed, your Istio installation will fail. If you do not plan on using Kiali, set `kiali.enabled=false` when installing Istio for a succesful installation. + +> **Note:** The following configuration options assume you have installed the dependecies for Kiali. Please ensure you have Promtheus in your cluster before proceeding. + +The Monitoring app sets `prometheus.prometheusSpec.ignoreNamespaceSelectors=false` which means all namespaces will be scraped by Prometheus by default. This ensures you can view traffic, metrics and graphs for resources deployed in other namespaces. + +To limit scraping to specific namespaces, set `prometheus.prometheusSpec.ignoreNamespaceSelectors=true` and add one of the following configurations to ensure you can continue to view traffic, metrics and graphs for your deployed resources. + +1. Add a Service Monitor or Pod Monitor in the namespace with the targets you want to scrape. +1. Add an additionalScrapeConfig to your rancher-monitoring instance to scrape all targets in all namespaces. + +#### External Services + +##### Prometheus +The `kiali.external_services.prometheus` url is set in the values.yaml: +``` +http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc:{{ prometheus.service.port }} +``` +The url depends on the default values for `nameOverride`, `namespaceOverride`, and `prometheus.service.port` being set in your rancher-monitoring or other monitoring instance. + +##### Grafana +The `kiali.external_services.grafana` url is set in the values.yaml: +``` +http://{{ .Values.nameOverride }}-grafana.{{ .Values.namespaceOverride }}.svc:{{ grafana.service.port }} +``` +The url depends on the default values for `nameOverride`, `namespaceOverride`, and `grafana.service.port` being set in your rancher-monitoring or other monitoring instance. + +##### Tracing +The `kiali.external_services.tracing` url and `.Values.tracing.contextPath` is set in the rancher-istio values.yaml: +``` +http://tracing.{{ .Values.namespaceOverride }}.svc:{{ .Values.service.externalPort }}/{{ .Values.tracing.contextPath }} +``` +The url depends on the default values for `namespaceOverride`, and `.Values.service.externalPort` being set in your rancher-tracing or other tracing instance. + +## Jaeger + +Jaeger allows you to trace and monitor distributed microservices. + +> **Note:** This addon is using the all-in-one Jaeger installation which is not qualified for production. Use the [Jaeger Tracing](https://www.jaegertracing.io/docs/1.21/getting-started/) documentation to determine which installation you will need for your production needs. + +# Installation +``` +helm install rancher-istio . --create-namespace -n istio-system +``` \ No newline at end of file diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/app-readme.md b/charts/rancher-istio-1.9/rancher-istio/1.9.300/app-readme.md new file mode 100755 index 000000000..0e42df083 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/app-readme.md @@ -0,0 +1,45 @@ +# Rancher Istio + +Our [Istio](https://istio.io/) installer wraps the istioctl binary commands in a handy helm chart, including an overlay file option to allow complex customization. It also includes: +* **[Kiali](https://kiali.io/)**: Used for graphing traffic flow throughout the mesh +* **[Jaeger](https://www.jaegertracing.io/)**: A quick start, all-in-one installation used for tracing distributed systemm. This is not production qualified, please refer to jaeger documentation to determine which installation you may need instead. + +### Dependencies + +**Rancher Monitoring or other Prometheus installation** + +The Prometheus CRDs are required for installing Kiali which is enabled by default. If you do not have Prometheus installed your Istio installation will fail. If you do not plan on using Kiali, set `kiali.enabled=false` to bypass this requirement. + +### Customization + +**Rancher Monitoring** + +The Rancher Monitoring app sets `prometheus.prometheusSpec.ignoreNamespaceSelectors=false` which means all namespaces will be scraped by Prometheus by default. This ensures you can view traffic, metrics and graphs for resources deployed in other namespaces. + +To limit scraping to specific namespaces, set `prometheus.prometheusSpec.ignoreNamespaceSelectors=true` and add one of the following configurations to ensure you can continue to view traffic, metrics and graphs for your deployed resources. + +1. Add a Service Monitor or Pod Monitor in the namespace with the targets you want to scrape. +1. Add an additionalScrapeConfig to your rancher-monitoring instance to scrape all targets in all namespaces. + +**Custom Prometheus Installation with Kiali** + +To use a custom Monitoring installation, set the `kiali.external_services.prometheus` url in the values.yaml. This url depends on the values for `nameOverride`, `namespaceOverride`, and `prometheus.service.port` in your rancher-monitoring or other monitoring instance: +``` +http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc:{{ prometheus.service.port }} +``` +**Custom Grafana Installation with Kiali** + +To use a custom Grafana installation, set the `kiali.external_services.grafana` url in the values.yaml. This url depends on the values for `nameOverride`, `namespaceOverride`, and `granfa.service.port` in your rancher-monitoring or other grafana instance: +``` +http://{{ .Values.nameOverride }}-grafana.{{ .Values.namespaceOverride }}.svc:{{ grafana.service.port }} +``` +**Custom Tracing Installation with Kiali** + +To use a custom Tracing installation, set the `kiali.external_services.tracing` url and update the `.Values.tracing.contextPath` in the rancher-istio values.yaml. + +This url depends on the values for `namespaceOverride`, and `.Values.service.externalPort` in your rancher-tracing or other tracing instance.: +``` +http://tracing.{{ .Values.namespaceOverride }}.svc:{{ .Values.service.externalPort }}/{{ .Values.tracing.contextPath }} +``` + +For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/istio/v2.5/). diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/Chart.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/Chart.yaml new file mode 100755 index 000000000..9b6fdf385 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/Chart.yaml @@ -0,0 +1,31 @@ +annotations: + catalog.cattle.io/auto-install: rancher-kiali-server-crd=match + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.cattle.io/provides-gvr: monitoringdashboards.monitoring.kiali.io/v1alpha1 + catalog.cattle.io/requires-gvr: monitoring.coreos.com.prometheus/v1 + catalog.rancher.io/namespace: cattle-istio-system + catalog.rancher.io/release-name: rancher-kiali-server +apiVersion: v2 +appVersion: v1.32.0 +description: Kiali is an open source project for service mesh observability, refer + to https://www.kiali.io for details. This is installed as sub-chart with customized + values in Rancher's Istio. +home: https://github.com/kiali/kiali +icon: https://raw.githubusercontent.com/kiali/kiali.io/master/themes/kiali/static/img/kiali_logo_masthead.png +keywords: +- istio +- kiali +- networking +- infrastructure +maintainers: +- email: kiali-users@googlegroups.com + name: Kiali + url: https://kiali.io +name: kiali +sources: +- https://github.com/kiali/kiali +- https://github.com/kiali/kiali-ui +- https://github.com/kiali/kiali-operator +- https://github.com/kiali/helm-charts +version: 1.32.1 diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/NOTES.txt b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/NOTES.txt new file mode 100755 index 000000000..751019401 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/NOTES.txt @@ -0,0 +1,5 @@ +Welcome to Kiali! For more details on Kiali, see: https://kiali.io + +The Kiali Server [{{ .Chart.AppVersion }}] has been installed in namespace [{{ .Release.Namespace }}]. It will be ready soon. + +(Helm: Chart=[{{ .Chart.Name }}], Release=[{{ .Release.Name }}], Version=[{{ .Chart.Version }}]) diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/_helpers.tpl b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/_helpers.tpl new file mode 100755 index 000000000..dd33bbe48 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/_helpers.tpl @@ -0,0 +1,192 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "kiali-server.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kiali-server.fullname" -}} +{{- if .Values.fullnameOverride }} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} + {{- $name := default .Chart.Name .Values.nameOverride }} + {{- printf "%s" $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kiali-server.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Identifies the log_level with the old verbose_mode and the new log_level considered. +*/}} +{{- define "kiali-server.logLevel" -}} +{{- if .Values.deployment.verbose_mode -}} +{{- .Values.deployment.verbose_mode -}} +{{- else -}} +{{- .Values.deployment.logger.log_level -}} +{{- end -}} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "kiali-server.labels" -}} +helm.sh/chart: {{ include "kiali-server.chart" . }} +app: {{ include "kiali-server.name" . }} +{{ include "kiali-server.selectorLabels" . }} +version: {{ .Values.deployment.version_label | default .Chart.AppVersion | quote }} +app.kubernetes.io/version: {{ .Values.deployment.version_label | default .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/part-of: "kiali" +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kiali-server.selectorLabels" -}} +app.kubernetes.io/name: {{ include "kiali-server.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Used to determine if a custom dashboard (defined in .Template.Name) should be deployed. +*/}} +{{- define "kiali-server.isDashboardEnabled" -}} +{{- if .Values.external_services.custom_dashboards.enabled }} + {{- $includere := "" }} + {{- range $_, $s := .Values.deployment.custom_dashboards.includes }} + {{- if $s }} + {{- if $includere }} + {{- $includere = printf "%s|^%s$" $includere ($s | replace "*" ".*" | replace "?" ".") }} + {{- else }} + {{- $includere = printf "^%s$" ($s | replace "*" ".*" | replace "?" ".") }} + {{- end }} + {{- end }} + {{- end }} + {{- $excludere := "" }} + {{- range $_, $s := .Values.deployment.custom_dashboards.excludes }} + {{- if $s }} + {{- if $excludere }} + {{- $excludere = printf "%s|^%s$" $excludere ($s | replace "*" ".*" | replace "?" ".") }} + {{- else }} + {{- $excludere = printf "^%s$" ($s | replace "*" ".*" | replace "?" ".") }} + {{- end }} + {{- end }} + {{- end }} + {{- if (and (mustRegexMatch (default "no-matches" $includere) (base .Template.Name)) (not (mustRegexMatch (default "no-matches" $excludere) (base .Template.Name)))) }} + {{- print "enabled" }} + {{- else }} + {{- print "" }} + {{- end }} +{{- else }} + {{- print "" }} +{{- end }} +{{- end }} + +{{/* +Determine the default login token signing key. +*/}} +{{- define "kiali-server.login_token.signing_key" -}} +{{- if .Values.login_token.signing_key }} + {{- .Values.login_token.signing_key }} +{{- else }} + {{- randAlphaNum 16 }} +{{- end }} +{{- end }} + +{{/* +Determine the default web root. +*/}} +{{- define "kiali-server.server.web_root" -}} +{{- if .Values.server.web_root }} + {{- .Values.server.web_root | trimSuffix "/" }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- "/" }} + {{- else }} + {{- "/kiali" }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Determine the default identity cert file. There is no default if on k8s; only on OpenShift. +*/}} +{{- define "kiali-server.identity.cert_file" -}} +{{- if hasKey .Values.identity "cert_file" }} + {{- .Values.identity.cert_file }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- "/kiali-cert/tls.crt" }} + {{- else }} + {{- "" }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Determine the default identity private key file. There is no default if on k8s; only on OpenShift. +*/}} +{{- define "kiali-server.identity.private_key_file" -}} +{{- if hasKey .Values.identity "private_key_file" }} + {{- .Values.identity.private_key_file }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- "/kiali-cert/tls.key" }} + {{- else }} + {{- "" }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Determine the istio namespace - default is where Kiali is installed. +*/}} +{{- define "kiali-server.istio_namespace" -}} +{{- if .Values.istio_namespace }} + {{- .Values.istio_namespace }} +{{- else }} + {{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{/* +Determine the auth strategy to use - default is "token" on Kubernetes and "openshift" on OpenShift. +*/}} +{{- define "kiali-server.auth.strategy" -}} +{{- if .Values.auth.strategy }} + {{- if (and (eq .Values.auth.strategy "openshift") (not .Values.kiali_route_url)) }} + {{- fail "You did not define what the Kiali Route URL will be (--set kiali_route_url=...). Without this set, the openshift auth strategy will not work. Either set that or use a different auth strategy via the --set auth.strategy=... option." }} + {{- end }} + {{- .Values.auth.strategy }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- if not .Values.kiali_route_url }} + {{- fail "You did not define what the Kiali Route URL will be (--set kiali_route_url=...). Without this set, the openshift auth strategy will not work. Either set that or explicitly indicate another auth strategy you want via the --set auth.strategy=... option." }} + {{- end }} + {{- "openshift" }} + {{- else }} + {{- "token" }} + {{- end }} +{{- end }} +{{- end }} + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/cabundle.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/cabundle.yaml new file mode 100755 index 000000000..7462b95a7 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/cabundle.yaml @@ -0,0 +1,13 @@ +{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kiali-server.fullname" . }}-cabundle + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + annotations: + service.beta.openshift.io/inject-cabundle: "true" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/configmap.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/configmap.yaml new file mode 100755 index 000000000..b1bf53173 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/configmap.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +data: + config.yaml: | + {{- /* Most of .Values is simply the ConfigMap - strip out the keys that are not part of the ConfigMap */}} + {{- $cm := omit .Values "nameOverride" "fullnameOverride" "kiali_route_url" }} + {{- /* The helm chart defines namespace for us, but pass it to the ConfigMap in case the server needs it */}} + {{- $_ := set $cm.deployment "namespace" .Release.Namespace }} + {{- /* Some values of the ConfigMap are generated, but might not be identical, from .Values */}} + {{- $_ := set $cm "istio_namespace" (include "kiali-server.istio_namespace" .) }} + {{- $_ := set $cm.auth "strategy" (include "kiali-server.auth.strategy" .) }} + {{- $_ := set $cm.auth.openshift "client_id_prefix" (include "kiali-server.fullname" .) }} + {{- $_ := set $cm.identity "cert_file" (include "kiali-server.identity.cert_file" .) }} + {{- $_ := set $cm.identity "private_key_file" (include "kiali-server.identity.private_key_file" .) }} + {{- $_ := set $cm.login_token "signing_key" (include "kiali-server.login_token.signing_key" .) }} + {{- $_ := set $cm.server "web_root" (include "kiali-server.server.web_root" .) }} + {{- toYaml $cm | nindent 4 }} +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/envoy.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/envoy.yaml new file mode 100755 index 000000000..85b402017 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/envoy.yaml @@ -0,0 +1,56 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: envoy + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Envoy Metrics + discoverOn: "envoy_server_uptime" + items: + - chart: + name: "Pods uptime" + spans: 4 + metricName: "envoy_server_uptime" + dataType: "raw" + - chart: + name: "Allocated memory" + unit: "bytes" + spans: 4 + metricName: "envoy_server_memory_allocated" + dataType: "raw" + min: 0 + - chart: + name: "Heap size" + unit: "bytes" + spans: 4 + metricName: "envoy_server_memory_heap_size" + dataType: "raw" + min: 0 + - chart: + name: "Upstream active connections" + spans: 6 + metricName: "envoy_cluster_upstream_cx_active" + dataType: "raw" + - chart: + name: "Upstream total requests" + spans: 6 + metricName: "envoy_cluster_upstream_rq_total" + unit: "rps" + dataType: "rate" + - chart: + name: "Downstream active connections" + spans: 6 + metricName: "envoy_listener_downstream_cx_active" + dataType: "raw" + - chart: + name: "Downstream HTTP requests" + spans: 6 + metricName: "envoy_listener_http_downstream_rq" + unit: "rps" + dataType: "rate" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/go.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/go.yaml new file mode 100755 index 000000000..2d2f42a93 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/go.yaml @@ -0,0 +1,67 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: go + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Go Metrics + runtime: Go + discoverOn: "go_info" + items: + - chart: + name: "CPU ratio" + spans: 6 + metricName: "process_cpu_seconds_total" + dataType: "rate" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "RSS Memory" + unit: "bytes" + spans: 6 + metricName: "process_resident_memory_bytes" + dataType: "raw" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "Goroutines" + spans: 6 + metricName: "go_goroutines" + dataType: "raw" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "Heap allocation rate" + unit: "bytes/s" + spans: 6 + metricName: "go_memstats_alloc_bytes_total" + dataType: "rate" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "GC rate" + spans: 6 + metricName: "go_gc_duration_seconds_count" + dataType: "rate" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "Next GC" + unit: "bytes" + spans: 6 + metricName: "go_memstats_next_gc_bytes" + dataType: "raw" + aggregations: + - label: "pod_name" + displayName: "Pod" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/kiali.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/kiali.yaml new file mode 100755 index 000000000..b1f011b4f --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/kiali.yaml @@ -0,0 +1,44 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: kiali + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Kiali Internal Metrics + items: + - chart: + name: "API processing duration" + unit: "seconds" + spans: 6 + metricName: "kiali_api_processing_duration_seconds" + dataType: "histogram" + aggregations: + - label: "route" + displayName: "Route" + - chart: + name: "Functions processing duration" + unit: "seconds" + spans: 6 + metricName: "kiali_go_function_processing_duration_seconds" + dataType: "histogram" + aggregations: + - label: "function" + displayName: "Function" + - label: "package" + displayName: "Package" + - chart: + name: "Failures" + spans: 12 + metricName: "kiali_go_function_failures_total" + dataType: "raw" + aggregations: + - label: "function" + displayName: "Function" + - label: "package" + displayName: "Package" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml new file mode 100755 index 000000000..2e1ed5cff --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml @@ -0,0 +1,43 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: micrometer-1.0.6-jvm-pool + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: JVM + title: JVM Pool Metrics + discoverOn: "jvm_buffer_total_capacity_bytes" + items: + - chart: + name: "Pool buffer memory used" + unit: "bytes" + spans: 4 + metricName: "jvm_buffer_memory_used_bytes" + dataType: "raw" + aggregations: + - label: "id" + displayName: "Pool" + - chart: + name: "Pool buffer capacity" + unit: "bytes" + spans: 4 + metricName: "jvm_buffer_total_capacity_bytes" + dataType: "raw" + aggregations: + - label: "id" + displayName: "Pool" + - chart: + name: "Pool buffer count" + unit: "bytes" + spans: 4 + metricName: "jvm_buffer_count" + dataType: "raw" + aggregations: + - label: "id" + displayName: "Pool" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml new file mode 100755 index 000000000..d64596882 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml @@ -0,0 +1,65 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: micrometer-1.0.6-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: JVM + title: JVM Metrics + discoverOn: "jvm_threads_live" + items: + - chart: + name: "Total live threads" + spans: 4 + metricName: "jvm_threads_live" + dataType: "raw" + - chart: + name: "Daemon threads" + spans: 4 + metricName: "jvm_threads_daemon" + dataType: "raw" + - chart: + name: "Loaded classes" + spans: 4 + metricName: "jvm_classes_loaded" + dataType: "raw" + + - chart: + name: "Memory used" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_used_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory commited" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_committed_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory max" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_max_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml new file mode 100755 index 000000000..76e8d0a4a --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml @@ -0,0 +1,68 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: micrometer-1.1-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: JVM + title: JVM Metrics + discoverOn: "jvm_threads_live_threads" + items: + - chart: + name: "Memory used" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_used_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory commited" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_committed_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory max" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_max_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + + - chart: + name: "Total live threads" + spans: 4 + metricName: "jvm_threads_live_threads" + dataType: "raw" + - chart: + name: "Daemon threads" + spans: 4 + metricName: "jvm_threads_daemon_threads" + dataType: "raw" + - chart: + name: "Threads states" + spans: 4 + metricName: "jvm_threads_states_threads" + dataType: "raw" + aggregations: + - label: "state" + displayName: "State" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/microprofile-1.1.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/microprofile-1.1.yaml new file mode 100755 index 000000000..1d4951196 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/microprofile-1.1.yaml @@ -0,0 +1,59 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: microprofile-1.1 + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: MicroProfile Metrics + runtime: MicroProfile + discoverOn: "base:thread_count" + items: + - chart: + name: "Current loaded classes" + spans: 6 + metricName: "base:classloader_current_loaded_class_count" + dataType: "raw" + - chart: + name: "Unloaded classes" + spans: 6 + metricName: "base:classloader_total_unloaded_class_count" + dataType: "raw" + - chart: + name: "Thread count" + spans: 4 + metricName: "base:thread_count" + dataType: "raw" + - chart: + name: "Thread max count" + spans: 4 + metricName: "base:thread_max_count" + dataType: "raw" + - chart: + name: "Thread daemon count" + spans: 4 + metricName: "base:thread_daemon_count" + dataType: "raw" + - chart: + name: "Committed heap" + unit: "bytes" + spans: 4 + metricName: "base:memory_committed_heap_bytes" + dataType: "raw" + - chart: + name: "Max heap" + unit: "bytes" + spans: 4 + metricName: "base:memory_max_heap_bytes" + dataType: "raw" + - chart: + name: "Used heap" + unit: "bytes" + spans: 4 + metricName: "base:memory_used_heap_bytes" + dataType: "raw" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/microprofile-x.y.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/microprofile-x.y.yaml new file mode 100755 index 000000000..57ddc60ef --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/microprofile-x.y.yaml @@ -0,0 +1,38 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: microprofile-x.y + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: MicroProfile Metrics + runtime: MicroProfile + discoverOn: "base:gc_complete_scavenger_count" + items: + - chart: + name: "Young GC time" + unit: "seconds" + spans: 3 + metricName: "base:gc_young_generation_scavenger_time_seconds" + dataType: "raw" + - chart: + name: "Young GC count" + spans: 3 + metricName: "base:gc_young_generation_scavenger_count" + dataType: "raw" + - chart: + name: "Total GC time" + unit: "seconds" + spans: 3 + metricName: "base:gc_complete_scavenger_time_seconds" + dataType: "raw" + - chart: + name: "Total GC count" + spans: 3 + metricName: "base:gc_complete_scavenger_count" + dataType: "raw" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/nodejs.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/nodejs.yaml new file mode 100755 index 000000000..1ffe0aa10 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/nodejs.yaml @@ -0,0 +1,59 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: nodejs + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Node.js + title: Node.js Metrics + discoverOn: "nodejs_active_handles_total" + items: + - chart: + name: "Active handles" + spans: 4 + metricName: "nodejs_active_handles_total" + dataType: "raw" + - chart: + name: "Active requests" + spans: 4 + metricName: "nodejs_active_requests_total" + dataType: "raw" + - chart: + name: "Event loop lag" + unit: "seconds" + spans: 4 + metricName: "nodejs_eventloop_lag_seconds" + dataType: "raw" + - chart: + name: "Total heap size" + unit: "bytes" + spans: 12 + metricName: "nodejs_heap_space_size_total_bytes" + dataType: "raw" + aggregations: + - label: "space" + displayName: "Space" + - chart: + name: "Used heap size" + unit: "bytes" + spans: 6 + metricName: "nodejs_heap_space_size_used_bytes" + dataType: "raw" + aggregations: + - label: "space" + displayName: "Space" + - chart: + name: "Available heap size" + unit: "bytes" + spans: 6 + metricName: "nodejs_heap_space_size_available_bytes" + dataType: "raw" + aggregations: + - label: "space" + displayName: "Space" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/quarkus.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/quarkus.yaml new file mode 100755 index 000000000..cef5f3dce --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/quarkus.yaml @@ -0,0 +1,33 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: quarkus + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Quarkus Metrics + runtime: Quarkus + items: + - chart: + name: "Thread count" + spans: 4 + metricName: "vendor:thread_count" + dataType: "raw" + - chart: + name: "Used heap" + unit: "bytes" + spans: 4 + metricName: "vendor:memory_heap_usage_bytes" + dataType: "raw" + - chart: + name: "Used non-heap" + unit: "bytes" + spans: 4 + metricName: "vendor:memory_non_heap_usage_bytes" + dataType: "raw" + - include: "microprofile-x.y" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml new file mode 100755 index 000000000..42d87d890 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: springboot-jvm-pool + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Spring Boot + title: JVM Pool Metrics + items: + - include: "micrometer-1.0.6-jvm-pool" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/springboot-jvm.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/springboot-jvm.yaml new file mode 100755 index 000000000..ced3acdd9 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/springboot-jvm.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: springboot-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Spring Boot + title: JVM Metrics + items: + - include: "micrometer-1.0.6-jvm" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/springboot-tomcat.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/springboot-tomcat.yaml new file mode 100755 index 000000000..c07016aa2 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/springboot-tomcat.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: springboot-tomcat + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Spring Boot + title: Tomcat Metrics + items: + - include: "tomcat" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/thorntail.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/thorntail.yaml new file mode 100755 index 000000000..6bd85e6f5 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/thorntail.yaml @@ -0,0 +1,22 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: thorntail + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Thorntail + title: Thorntail Metrics + discoverOn: "vendor:loaded_modules" + items: + - include: "microprofile-1.1" + - chart: + name: "Loaded modules" + spans: 6 + metricName: "vendor:loaded_modules" + dataType: "raw" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/tomcat.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/tomcat.yaml new file mode 100755 index 000000000..9a803342f --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/tomcat.yaml @@ -0,0 +1,67 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: tomcat + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Tomcat + title: Tomcat Metrics + discoverOn: "tomcat_sessions_created_total" + items: + - chart: + name: "Sessions created" + spans: 4 + metricName: "tomcat_sessions_created_total" + dataType: "raw" + - chart: + name: "Active sessions" + spans: 4 + metricName: "tomcat_sessions_active_current" + dataType: "raw" + - chart: + name: "Sessions rejected" + spans: 4 + metricName: "tomcat_sessions_rejected_total" + dataType: "raw" + + - chart: + name: "Bytes sent" + unit: "bitrate" + spans: 6 + metricName: "tomcat_global_sent_bytes_total" + dataType: "rate" + aggregations: + - label: "name" + displayName: "Name" + - chart: + name: "Bytes received" + unit: "bitrate" + spans: 6 + metricName: "tomcat_global_received_bytes_total" + dataType: "rate" + aggregations: + - label: "name" + displayName: "Name" + + - chart: + name: "Global errors" + spans: 6 + metricName: "tomcat_global_error_total" + dataType: "raw" + aggregations: + - label: "name" + displayName: "Name" + - chart: + name: "Servlet errors" + spans: 6 + metricName: "tomcat_servlet_error_total" + dataType: "raw" + aggregations: + - label: "name" + displayName: "Name" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-client.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-client.yaml new file mode 100755 index 000000000..2d591d6b0 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-client.yaml @@ -0,0 +1,60 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-client + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Client Metrics + discoverOn: "vertx_http_client_connections" + items: + - chart: + name: "Client response time" + unit: "seconds" + spans: 6 + metricName: "vertx_http_client_responseTime_seconds" + dataType: "histogram" + aggregations: + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Client request count rate" + unit: "ops" + spans: 6 + metricName: "vertx_http_client_requestCount_total" + dataType: "rate" + aggregations: + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Client active connections" + spans: 6 + metricName: "vertx_http_client_connections" + dataType: "raw" + - chart: + name: "Client active websockets" + spans: 6 + metricName: "vertx_http_client_wsConnections" + dataType: "raw" + - chart: + name: "Client bytes sent" + unit: "bytes" + spans: 6 + metricName: "vertx_http_client_bytesSent" + dataType: "histogram" + - chart: + name: "Client bytes received" + unit: "bytes" + spans: 6 + metricName: "vertx_http_client_bytesReceived" + dataType: "histogram" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-eventbus.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-eventbus.yaml new file mode 100755 index 000000000..65f9ee2ec --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-eventbus.yaml @@ -0,0 +1,59 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-eventbus + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Eventbus Metrics + discoverOn: "vertx_eventbus_handlers" + items: + - chart: + name: "Event bus handlers" + spans: 6 + metricName: "vertx_eventbus_handlers" + dataType: "raw" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus pending messages" + spans: 6 + metricName: "vertx_eventbus_pending" + dataType: "raw" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus processing time" + unit: "seconds" + spans: 6 + metricName: "vertx_eventbus_processingTime_seconds" + dataType: "histogram" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus bytes read" + unit: "bytes" + spans: 6 + metricName: "vertx_eventbus_bytesRead" + dataType: "histogram" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus bytes written" + unit: "bytes" + spans: 6 + metricName: "vertx_eventbus_bytesWritten" + dataType: "histogram" + aggregations: + - label: "address" + displayName: "Eventbus address" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-jvm.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-jvm.yaml new file mode 100755 index 000000000..2663186f3 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-jvm.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: JVM Metrics + items: + - include: "micrometer-1.1-jvm" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-pool.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-pool.yaml new file mode 100755 index 000000000..f6af921b3 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-pool.yaml @@ -0,0 +1,68 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-pool + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Pools Metrics + discoverOn: "vertx_pool_ratio" + items: + - chart: + name: "Usage duration" + unit: "seconds" + spans: 6 + metricName: "vertx_pool_usage_seconds" + dataType: "histogram" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Usage ratio" + spans: 6 + metricName: "vertx_pool_ratio" + dataType: "raw" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Queue size" + spans: 6 + metricName: "vertx_pool_queue_size" + dataType: "raw" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Time in queue" + unit: "seconds" + spans: 6 + metricName: "vertx_pool_queue_delay_seconds" + dataType: "histogram" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Resources used" + spans: 6 + metricName: "vertx_pool_inUse" + dataType: "raw" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-server.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-server.yaml new file mode 100755 index 000000000..de6b89df9 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/dashboards/vertx-server.yaml @@ -0,0 +1,62 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-server + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Server Metrics + discoverOn: "vertx_http_server_connections" + items: + - chart: + name: "Server response time" + unit: "seconds" + spans: 6 + metricName: "vertx_http_server_responseTime_seconds" + dataType: "histogram" + aggregations: + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Server request count rate" + unit: "ops" + spans: 6 + metricName: "vertx_http_server_requestCount_total" + dataType: "rate" + aggregations: + - label: "code" + displayName: "Error code" + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Server active connections" + spans: 6 + metricName: "vertx_http_server_connections" + dataType: "raw" + - chart: + name: "Server active websockets" + spans: 6 + metricName: "vertx_http_server_wsConnections" + dataType: "raw" + - chart: + name: "Server bytes sent" + unit: "bytes" + spans: 6 + metricName: "vertx_http_server_bytesSent" + dataType: "histogram" + - chart: + name: "Server bytes received" + unit: "bytes" + spans: 6 + metricName: "vertx_http_server_bytesReceived" + dataType: "histogram" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/deployment.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/deployment.yaml new file mode 100755 index 000000000..100c57922 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/deployment.yaml @@ -0,0 +1,174 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.deployment.replicas }} + selector: + matchLabels: + {{- include "kiali-server.selectorLabels" . | nindent 6 }} + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + name: {{ include "kiali-server.fullname" . }} + labels: + {{- include "kiali-server.labels" . | nindent 8 }} + {{- if .Values.deployment.pod_labels }} + {{- toYaml .Values.deployment.pod_labels | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.server.metrics_enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ .Values.server.metrics_port | quote }} + {{- else }} + prometheus.io/scrape: "false" + prometheus.io/port: "" + {{- end }} + kiali.io/runtimes: go,kiali + {{- if .Values.deployment.pod_annotations }} + {{- toYaml .Values.deployment.pod_annotations | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "kiali-server.fullname" . }} + {{- if .Values.deployment.priority_class_name }} + priorityClassName: {{ .Values.deployment.priority_class_name | quote }} + {{- end }} + {{- if .Values.deployment.image_pull_secrets }} + imagePullSecrets: + {{- range .Values.deployment.image_pull_secrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - image: "{{ template "system_default_registry" . }}{{ .Values.deployment.repository }}:{{ .Values.deployment.tag }}" + imagePullPolicy: {{ .Values.deployment.image_pull_policy | default "Always" }} + name: {{ include "kiali-server.fullname" . }} + command: + - "/opt/kiali/kiali" + - "-config" + - "/kiali-configuration/config.yaml" + ports: + - name: api-port + containerPort: {{ .Values.server.port | default 20001 }} + {{- if .Values.server.metrics_enabled }} + - name: http-metrics + containerPort: {{ .Values.server.metrics_port | default 9090 }} + {{- end }} + readinessProbe: + httpGet: + path: {{ include "kiali-server.server.web_root" . | trimSuffix "/" }}/healthz + port: api-port + {{- if (include "kiali-server.identity.cert_file" .) }} + scheme: HTTPS + {{- else }} + scheme: HTTP + {{- end }} + initialDelaySeconds: 5 + periodSeconds: 30 + livenessProbe: + httpGet: + path: {{ include "kiali-server.server.web_root" . | trimSuffix "/" }}/healthz + port: api-port + {{- if (include "kiali-server.identity.cert_file" .) }} + scheme: HTTPS + {{- else }} + scheme: HTTP + {{- end }} + initialDelaySeconds: 5 + periodSeconds: 30 + env: + - name: ACTIVE_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "{{ include "kiali-server.logLevel" . }}" + - name: LOG_FORMAT + value: "{{ .Values.deployment.logger.log_format }}" + - name: LOG_TIME_FIELD_FORMAT + value: "{{ .Values.deployment.logger.time_field_format }}" + - name: LOG_SAMPLER_RATE + value: "{{ .Values.deployment.logger.sampler_rate }}" + volumeMounts: + {{- if .Values.web_root_override }} + - name: kiali-console + subPath: env.js + mountPath: /opt/kiali/console/env.js + {{- end }} + - name: {{ include "kiali-server.fullname" . }}-configuration + mountPath: "/kiali-configuration" + - name: {{ include "kiali-server.fullname" . }}-cert + mountPath: "/kiali-cert" + - name: {{ include "kiali-server.fullname" . }}-secret + mountPath: "/kiali-secret" + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + - name: {{ include "kiali-server.fullname" . }}-cabundle + mountPath: "/kiali-cabundle" + {{- end }} + {{- if .Values.deployment.resources }} + resources: + {{- toYaml .Values.deployment.resources | nindent 10 }} + {{- end }} + volumes: + {{- if .Values.web_root_override }} + - name: kiali-console + configMap: + name: kiali-console + items: + - key: env.js + path: env.js + {{- end }} + - name: {{ include "kiali-server.fullname" . }}-configuration + configMap: + name: {{ include "kiali-server.fullname" . }} + - name: {{ include "kiali-server.fullname" . }}-cert + secret: + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + secretName: {{ include "kiali-server.fullname" . }}-cert-secret + {{- else }} + secretName: istio.{{ include "kiali-server.fullname" . }}-service-account + {{- end }} + {{- if not (include "kiali-server.identity.cert_file" .) }} + optional: true + {{- end }} + - name: {{ include "kiali-server.fullname" . }}-secret + secret: + secretName: {{ .Values.deployment.secret_name }} + optional: true + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + - name: {{ include "kiali-server.fullname" . }}-cabundle + configMap: + name: {{ include "kiali-server.fullname" . }}-cabundle + {{- end }} + {{- if or (.Values.deployment.affinity.node) (or (.Values.deployment.pod) (.Values.deployment.pod_anti)) }} + affinity: + {{- if .Values.deployment.affinity.node }} + nodeAffinity: + {{- toYaml .Values.deployment.affinity.node | nindent 10 }} + {{- end }} + {{- if .Values.deployment.affinity.pod }} + podAffinity: + {{- toYaml .Values.deployment.affinity.pod | nindent 10 }} + {{- end }} + {{- if .Values.deployment.affinity.pod_anti }} + podAntiAffinity: + {{- toYaml .Values.deployment.affinity.pod_anti | nindent 10 }} + {{- end }} + {{- end }} + {{- if .Values.deployment.tolerations }} + tolerations: + {{- toYaml .Values.deployment.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.deployment.node_selector }} + nodeSelector: + {{- toYaml .Values.deployment.node_selector | nindent 8 }} + {{- end }} +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/hpa.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/hpa.yaml new file mode 100755 index 000000000..934c4c1e9 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/hpa.yaml @@ -0,0 +1,17 @@ +{{- if .Values.deployment.hpa.spec }} +--- +apiVersion: {{ .Values.deployment.hpa.api_version }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "kiali-server.fullname" . }} + {{- toYaml .Values.deployment.hpa.spec | nindent 2 }} +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/ingress.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/ingress.yaml new file mode 100755 index 000000000..e4c98db1b --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/ingress.yaml @@ -0,0 +1,40 @@ +{{- if not (.Capabilities.APIVersions.Has "route.openshift.io/v1") }} +{{- if .Values.deployment.ingress_enabled }} +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + annotations: + {{- if hasKey .Values.deployment.override_ingress_yaml.metadata "annotations" }} + {{- toYaml .Values.deployment.override_ingress_yaml.metadata.annotations | nindent 4 }} + {{- else }} + # For ingress-nginx versions older than 0.20.0 use secure-backends. + # (see: https://github.com/kubernetes/ingress-nginx/issues/3416#issuecomment-438247948) + # For ingress-nginx versions 0.20.0 and later use backend-protocol. + {{- if (include "kiali-server.identity.cert_file" .) }} + nginx.ingress.kubernetes.io/secure-backends: "true" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + {{- else }} + nginx.ingress.kubernetes.io/secure-backends: "false" + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" + {{- end }} + {{- end }} +spec: + {{- if hasKey .Values.deployment.override_ingress_yaml "spec" }} + {{- toYaml .Values.deployment.override_ingress_yaml.spec | nindent 2 }} + {{- else }} + rules: + - http: + paths: + - path: {{ include "kiali-server.server.web_root" . }} + backend: + serviceName: {{ include "kiali-server.fullname" . }} + servicePort: {{ .Values.server.port }} + {{- end }} +... +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/oauth.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/oauth.yaml new file mode 100755 index 000000000..a178bb85e --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/oauth.yaml @@ -0,0 +1,17 @@ +{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} +{{- if .Values.kiali_route_url }} +--- +apiVersion: oauth.openshift.io/v1 +kind: OAuthClient +metadata: + name: {{ include "kiali-server.fullname" . }}-{{ .Release.Namespace }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +redirectURIs: +- {{ .Values.kiali_route_url }} +grantMethod: auto +allowAnyScope: true +... +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/psp.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/psp.yaml new file mode 100755 index 000000000..f891892cc --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/psp.yaml @@ -0,0 +1,67 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "kiali-server.fullname" . }}-psp + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "kiali-server.fullname" . }}-psp +subjects: + - kind: ServiceAccount + name: kiali +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "kiali-server.fullname" . }}-psp + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: + - policy + resourceNames: + - {{ include "kiali-server.fullname" . }}-psp + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "kiali-server.fullname" . }}-psp + namespace: {{ .Release.Namespace }} +spec: + allowPrivilegeEscalation: false + forbiddenSysctls: + - '*' + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + runAsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI + - persistentVolumeClaim +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/role-controlplane.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/role-controlplane.yaml new file mode 100755 index 000000000..a22c76756 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/role-controlplane.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "kiali-server.fullname" . }}-controlplane + namespace: {{ include "kiali-server.istio_namespace" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: + - secrets + verbs: + - list +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/role-viewer.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/role-viewer.yaml new file mode 100755 index 000000000..9fdd9fd1d --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/role-viewer.yaml @@ -0,0 +1,97 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "kiali-server.fullname" . }}-viewer + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: + - configmaps + - endpoints + - namespaces + - nodes + - pods + - pods/log + - pods/proxy + - replicationcontrollers + - services + verbs: + - get + - list + - watch +- apiGroups: [""] + resources: + - pods/portforward + verbs: + - create + - post +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch +- apiGroups: ["batch"] + resources: + - cronjobs + - jobs + verbs: + - get + - list + - watch +- apiGroups: + - networking.istio.io + - security.istio.io + resources: ["*"] + verbs: + - get + - list + - watch +- apiGroups: ["apps.openshift.io"] + resources: + - deploymentconfigs + verbs: + - get + - list + - watch +- apiGroups: ["project.openshift.io"] + resources: + - projects + verbs: + - get +- apiGroups: ["route.openshift.io"] + resources: + - routes + verbs: + - get +- apiGroups: ["monitoring.kiali.io"] + resources: + - monitoringdashboards + verbs: + - get + - list +- apiGroups: ["iter8.tools"] + resources: + - experiments + verbs: + - get + - list +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: + - create +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/role.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/role.yaml new file mode 100755 index 000000000..8444bc753 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/role.yaml @@ -0,0 +1,108 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "kiali-server.fullname" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: + - configmaps + - endpoints + - namespaces + - nodes + - pods + - pods/log + - pods/proxy + - replicationcontrollers + - services + verbs: + - get + - list + - patch + - watch +- apiGroups: [""] + resources: + - pods/portforward + verbs: + - create + - post +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - patch + - watch +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch +- apiGroups: ["batch"] + resources: + - cronjobs + - jobs + verbs: + - get + - list + - patch + - watch +- apiGroups: + - networking.istio.io + - security.istio.io + resources: ["*"] + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: ["apps.openshift.io"] + resources: + - deploymentconfigs + verbs: + - get + - list + - patch + - watch +- apiGroups: ["project.openshift.io"] + resources: + - projects + verbs: + - get +- apiGroups: ["route.openshift.io"] + resources: + - routes + verbs: + - get +- apiGroups: ["monitoring.kiali.io"] + resources: + - monitoringdashboards + verbs: + - get + - list +- apiGroups: ["iter8.tools"] + resources: + - experiments + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: + - create +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/rolebinding-controlplane.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/rolebinding-controlplane.yaml new file mode 100755 index 000000000..5a0015836 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/rolebinding-controlplane.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "kiali-server.fullname" . }}-controlplane + namespace: {{ include "kiali-server.istio_namespace" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "kiali-server.fullname" . }}-controlplane +subjects: +- kind: ServiceAccount + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/rolebinding.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/rolebinding.yaml new file mode 100755 index 000000000..1eaabd65f --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/rolebinding.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "kiali-server.fullname" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + {{- if .Values.deployment.view_only_mode }} + name: {{ include "kiali-server.fullname" . }}-viewer + {{- else }} + name: {{ include "kiali-server.fullname" . }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/route.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/route.yaml new file mode 100755 index 000000000..27940dc96 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/route.yaml @@ -0,0 +1,30 @@ +{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} +{{- if .Values.deployment.ingress_enabled }} +# As of OpenShift 4.5, need to use --disable-openapi-validation when installing via Helm +--- +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + {{- if hasKey .Values.deployment.override_ingress_yaml.metadata "annotations" }}} + annotations: + {{- toYaml .Values.deployment.override_ingress_yaml.metadata.annotations | nindent 4 }} + {{- end }} +spec: + {{- if hasKey .Values.deployment.override_ingress_yaml "spec" }} + {{- toYaml .Values.deployment.override_ingress_yaml.spec | nindent 2 }} + {{- else }} + tls: + termination: reencrypt + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + targetPort: {{ .Values.server.port }} + name: {{ include "kiali-server.fullname" . }} + {{- end }} +... +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/service.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/service.yaml new file mode 100755 index 000000000..9ccf4f388 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/service.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + annotations: + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + service.beta.openshift.io/serving-cert-secret-name: {{ include "kiali-server.fullname" . }}-cert-secret + {{- end }} + kiali.io/api-spec: https://kiali.io/api + kiali.io/api-type: rest + {{- if and (not (empty .Values.server.web_fqdn)) (not (empty .Values.server.web_schema)) }} + {{- if empty .Values.server.web_port }} + kiali.io/external-url: {{ .Values.server.web_schema }}://{{ .Values.server.web_fqdn }}{{ default "" .Values.server.web_root }} + {{- else }} + kiali.io/external-url: {{ .Values.server.web_schema }}://{{ .Values.server.web_fqdn }}:{{ .Values.server.web_port }}{{(default "" .Values.server.web_root) }} + {{- end }} + {{- end }} + {{- if .Values.deployment.service_annotations }} + {{- toYaml .Values.deployment.service_annotations | nindent 4 }} + {{- end }} +spec: + {{- if .Values.deployment.service_type }} + type: {{ .Values.deployment.service_type }} + {{- end }} + ports: + {{- if (include "kiali-server.identity.cert_file" .) }} + - name: tcp + {{- else }} + - name: http + {{- end }} + protocol: TCP + port: {{ .Values.server.port }} + {{- if .Values.server.metrics_enabled }} + - name: http-metrics + protocol: TCP + port: {{ .Values.server.metrics_port }} + {{- end }} + selector: + {{- include "kiali-server.selectorLabels" . | nindent 4 }} + {{- if .Values.deployment.additional_service_yaml }} + {{- toYaml .Values.deployment.additional_service_yaml | nindent 2 }} + {{- end }} +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/serviceaccount.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/serviceaccount.yaml new file mode 100755 index 000000000..9151b6f6a --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/serviceaccount.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/validate-install-crd.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/validate-install-crd.yaml new file mode 100755 index 000000000..b42eeb266 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/validate-install-crd.yaml @@ -0,0 +1,14 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +# {{- $found := dict -}} +# {{- set $found "monitoring.kiali.io/v1alpha1/MonitoringDashboard" false -}} +# {{- range .Capabilities.APIVersions -}} +# {{- if hasKey $found (toString .) -}} +# {{- set $found (toString .) true -}} +# {{- end -}} +# {{- end -}} +# {{- range $_, $exists := $found -}} +# {{- if (eq $exists false) -}} +# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}} +# {{- end -}} +# {{- end -}} +#{{- end -}} \ No newline at end of file diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/web-root-configmap.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/web-root-configmap.yaml new file mode 100755 index 000000000..970d4e4f5 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/templates/web-root-configmap.yaml @@ -0,0 +1,12 @@ +{{- if .Values.web_root_override }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: kiali-console + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +data: + env.js: | + window.WEB_ROOT='/k8s/clusters/{{ .Values.global.cattle.clusterId }}/api/v1/namespaces/{{ .Release.Namespace }}/services/http:kiali:20001/proxy/kiali'; +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/values.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/values.yaml new file mode 100755 index 000000000..aada4e09a --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/kiali/values.yaml @@ -0,0 +1,93 @@ +nameOverride: "kiali" +fullnameOverride: "kiali" + +# This is required for "openshift" auth strategy. +# You have to know ahead of time what your Route URL will be because +# right now the helm chart can't figure this out at runtime (it would +# need to wait for the Kiali Route to be deployed and for OpenShift +# to start it up). If someone knows how to update this helm chart to +# do this, a PR would be welcome. +kiali_route_url: "" + +# rancher specific override that allows proxy access to kiali url +web_root_override: true + +# +# Settings that mimic the Kiali CR which are placed in the ConfigMap. +# Note that only those values used by the Helm Chart will be here. +# + +istio_namespace: "" # default is where Kiali is installed + +auth: + openid: {} + openshift: {} + strategy: "" + +deployment: + # This only limits what Kiali will attempt to see, but Kiali Service Account has permissions to see everything. + # For more control over what the Kial Service Account can see, use the Kiali Operator + accessible_namespaces: + - "**" + additional_service_yaml: {} + affinity: + node: {} + pod: {} + pod_anti: {} + custom_dashboards: + excludes: [''] + includes: ['*'] + hpa: + api_version: "autoscaling/v2beta2" + spec: {} + repository: rancher/mirrored-kiali-kiali + image_pull_policy: "Always" + image_pull_secrets: [] + tag: v1.32.0 + ingress_enabled: true + logger: + log_format: "text" + log_level: "info" + time_field_format: "2006-01-02T15:04:05Z07:00" + sampler_rate: "1" + node_selector: {} + override_ingress_yaml: + metadata: {} + pod_annotations: {} + pod_labels: {} + priority_class_name: "" + replicas: 1 + resources: {} + secret_name: "kiali" + service_annotations: {} + service_type: "" + tolerations: [] + version_label: v1.32.0 + view_only_mode: false + +external_services: + custom_dashboards: + enabled: true + +identity: {} + #cert_file: + #private_key_file: + +login_token: + signing_key: "" + +server: + port: 20001 + metrics_enabled: true + metrics_port: 9090 + web_root: "" + +# Common settings used among istio subcharts. +global: + # Specify rancher clusterId of external tracing config + # https://github.com/istio/istio.io/issues/4146#issuecomment-493543032 + cattle: + systemDefaultRegistry: "" + clusterId: + rbac: + pspEnabled: false diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/.helmignore b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/Chart.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/Chart.yaml new file mode 100755 index 000000000..6e368616d --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/Chart.yaml @@ -0,0 +1,12 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: istio-system + catalog.rancher.io/release-name: rancher-tracing +apiVersion: v1 +appVersion: 1.20.0 +description: A quick start Jaeger Tracing installation using the all-in-one demo. + This is not production qualified. Refer to https://www.jaegertracing.io/ for details. +name: tracing +version: 1.20.1 diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/README.md b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/README.md new file mode 100755 index 000000000..25534c628 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/README.md @@ -0,0 +1,5 @@ +# Jaeger + +A Rancher chart based on the Jaeger all-in-one quick installation option. This chart will allow you to trace and monitor distributed microservices. + +> **Note:** The basic all-in-one Jaeger installation which is not qualified for production. Use the [Jaeger Tracing](https://www.jaegertracing.io) documentation to determine which installation you will need for your production needs. diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/_affinity.tpl b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/_affinity.tpl new file mode 100755 index 000000000..bf6a9aee5 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/_affinity.tpl @@ -0,0 +1,92 @@ +{{/* affinity - https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ */}} +{{- define "nodeAffinity" }} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + {{- include "nodeAffinityRequiredDuringScheduling" . }} + preferredDuringSchedulingIgnoredDuringExecution: + {{- include "nodeAffinityPreferredDuringScheduling" . }} +{{- end }} + +{{- define "nodeAffinityRequiredDuringScheduling" }} + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + {{- range $key, $val := .Values.global.arch }} + {{- if gt ($val | int) 0 }} + - {{ $key | quote }} + {{- end }} + {{- end }} + {{- $nodeSelector := default .Values.global.defaultNodeSelector .Values.nodeSelector -}} + {{- range $key, $val := $nodeSelector }} + - key: {{ $key }} + operator: In + values: + - {{ $val | quote }} + {{- end }} +{{- end }} + +{{- define "nodeAffinityPreferredDuringScheduling" }} + {{- range $key, $val := .Values.global.arch }} + {{- if gt ($val | int) 0 }} + - weight: {{ $val | int }} + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - {{ $key | quote }} + {{- end }} + {{- end }} +{{- end }} + +{{- define "podAntiAffinity" }} +{{- if or .Values.podAntiAffinityLabelSelector .Values.podAntiAffinityTermLabelSelector}} + podAntiAffinity: + {{- if .Values.podAntiAffinityLabelSelector }} + requiredDuringSchedulingIgnoredDuringExecution: + {{- include "podAntiAffinityRequiredDuringScheduling" . }} + {{- end }} + {{- if or .Values.podAntiAffinityTermLabelSelector}} + preferredDuringSchedulingIgnoredDuringExecution: + {{- include "podAntiAffinityPreferredDuringScheduling" . }} + {{- end }} +{{- end }} +{{- end }} + +{{- define "podAntiAffinityRequiredDuringScheduling" }} + {{- range $index, $item := .Values.podAntiAffinityLabelSelector }} + - labelSelector: + matchExpressions: + - key: {{ $item.key }} + operator: {{ $item.operator }} + {{- if $item.values }} + values: + {{- $vals := split "," $item.values }} + {{- range $i, $v := $vals }} + - {{ $v | quote }} + {{- end }} + {{- end }} + topologyKey: {{ $item.topologyKey }} + {{- end }} +{{- end }} + +{{- define "podAntiAffinityPreferredDuringScheduling" }} + {{- range $index, $item := .Values.podAntiAffinityTermLabelSelector }} + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: {{ $item.key }} + operator: {{ $item.operator }} + {{- if $item.values }} + values: + {{- $vals := split "," $item.values }} + {{- range $i, $v := $vals }} + - {{ $v | quote }} + {{- end }} + {{- end }} + topologyKey: {{ $item.topologyKey }} + weight: 100 + {{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/_helpers.tpl b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/_helpers.tpl new file mode 100755 index 000000000..56cfa7335 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "tracing.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "tracing.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/deployment.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/deployment.yaml new file mode 100755 index 000000000..25bb67fd3 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/deployment.yaml @@ -0,0 +1,86 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + app: {{ .Values.provider }} + template: + metadata: + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + sidecar.istio.io/inject: "false" + prometheus.io/scrape: "true" + prometheus.io/port: "14269" +{{- if .Values.jaeger.podAnnotations }} +{{ toYaml .Values.jaeger.podAnnotations | indent 8 }} +{{- end }} + spec: + containers: + - name: jaeger + image: "{{ template "system_default_registry" . }}{{ .Values.jaeger.repository }}:{{ .Values.jaeger.tag }}" + imagePullPolicy: {{ .Values.global.imagePullPolicy }} + env: + {{- if eq .Values.jaeger.spanStorageType "badger" }} + - name: BADGER_EPHEMERAL + value: "false" + - name: SPAN_STORAGE_TYPE + value: "badger" + - name: BADGER_DIRECTORY_VALUE + value: "/badger/data" + - name: BADGER_DIRECTORY_KEY + value: "/badger/key" + {{- end }} + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: MEMORY_MAX_TRACES + value: "{{ .Values.jaeger.memory.max_traces }}" + - name: QUERY_BASE_PATH + value: {{ if .Values.contextPath }} {{ .Values.contextPath }} {{ else }} /{{ .Values.provider }} {{ end }} + livenessProbe: + httpGet: + path: / + port: 14269 + readinessProbe: + httpGet: + path: / + port: 14269 +{{- if eq .Values.jaeger.spanStorageType "badger" }} + volumeMounts: + - name: data + mountPath: /badger +{{- end }} + resources: +{{- if .Values.jaeger.resources }} +{{ toYaml .Values.jaeger.resources | indent 12 }} +{{- else }} +{{ toYaml .Values.global.defaultResources | indent 12 }} +{{- end }} + affinity: + {{- include "nodeAffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} + {{- if .Values.global.rbac.pspEnabled }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + serviceAccountName: {{ include "tracing.fullname" . }} + {{- end }} +{{- if eq .Values.jaeger.spanStorageType "badger" }} + volumes: + - name: data +{{- if .Values.jaeger.persistentVolumeClaim.enabled }} + persistentVolumeClaim: + claimName: istio-jaeger-pvc +{{- else }} + emptyDir: {} +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/psp.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/psp.yaml new file mode 100755 index 000000000..44b230492 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/psp.yaml @@ -0,0 +1,86 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "tracing.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "tracing.fullname" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: +- apiGroups: + - policy + resourceNames: + - {{ include "tracing.fullname" . }} + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + forbiddenSysctls: + - '*' + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + runAsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - emptyDir + - secret + - persistentVolumeClaim +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/pvc.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/pvc.yaml new file mode 100755 index 000000000..9b4c55e4f --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/pvc.yaml @@ -0,0 +1,16 @@ +{{- if .Values.jaeger.persistentVolumeClaim.enabled }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: istio-jaeger-pvc + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} +spec: + storageClassName: {{ .Values.jaeger.storageClassName }} + accessModes: + - {{ .Values.jaeger.accessMode }} + resources: + requests: + storage: {{.Values.jaeger.persistentVolumeClaim.storage }} +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/service.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/service.yaml new file mode 100755 index 000000000..4210a9b5f --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/templates/service.yaml @@ -0,0 +1,63 @@ +apiVersion: v1 +kind: Service +metadata: + name: tracing + namespace: {{ .Release.Namespace }} + annotations: + {{- range $key, $val := .Values.service.annotations }} + {{ $key }}: {{ $val | quote }} + {{- end }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + type: {{ .Values.service.type }} + ports: + - name: {{ .Values.service.name }} + port: {{ .Values.service.externalPort }} + protocol: TCP + targetPort: 16686 + selector: + app: {{ .Values.provider }} +--- +# Jaeger implements the Zipkin API. To support swapping out the tracing backend, we use a Service named Zipkin. +apiVersion: v1 +kind: Service +metadata: + name: zipkin + namespace: {{ .Release.Namespace }} + labels: + name: zipkin + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + ports: + - name: {{ .Values.service.name }} + port: {{ .Values.zipkin.queryPort }} + targetPort: {{ .Values.zipkin.queryPort }} + selector: + app: {{ .Values.provider }} +--- +apiVersion: v1 +kind: Service +metadata: + name: jaeger-collector + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + type: ClusterIP + ports: + - name: jaeger-collector-http + port: 14268 + targetPort: 14268 + protocol: TCP + - name: jaeger-collector-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + selector: + app: {{ .Values.provider }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/values.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/values.yaml new file mode 100755 index 000000000..18ff81c3c --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/charts/tracing/values.yaml @@ -0,0 +1,44 @@ +provider: jaeger +contextPath: "" +nodeSelector: {} +podAntiAffinityLabelSelector: [] +podAntiAffinityTermLabelSelector: [] +nameOverride: "" +fullnameOverride: "" + +global: + cattle: + systemDefaultRegistry: "" + defaultResources: {} + imagePullPolicy: IfNotPresent + imagePullSecrets: [] + arch: + amd64: 2 + s390x: 2 + ppc64le: 2 + defaultNodeSelector: {} + rbac: + pspEnabled: false + +jaeger: + repository: rancher/mirrored-jaegertracing-all-in-one + tag: 1.20.0 + # spanStorageType value can be "memory" and "badger" for all-in-one image + spanStorageType: badger + resources: + requests: + cpu: 10m + persistentVolumeClaim: + enabled: false + storage: 5Gi + storageClassName: "" + accessMode: ReadWriteMany + memory: + max_traces: 50000 +zipkin: + queryPort: 9411 +service: + annotations: {} + name: http-query + type: ClusterIP + externalPort: 16686 diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/configs/istio-base.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/configs/istio-base.yaml new file mode 100755 index 000000000..c5fa6f5f0 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/configs/istio-base.yaml @@ -0,0 +1,82 @@ +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + components: + base: + enabled: {{ .Values.base.enabled }} + cni: + enabled: {{ .Values.cni.enabled }} + egressGateways: + - enabled: {{ .Values.egressGateways.enabled }} + name: istio-egressgateway + ingressGateways: + - enabled: {{ .Values.ingressGateways.enabled }} + name: istio-ingressgateway + k8s: + service: + ports: + - name: status-port + port: 15021 + targetPort: 15021 + - name: http2 + port: 80 + targetPort: 8080 + nodePort: 31380 + - name: https + port: 443 + targetPort: 8443 + nodePort: 31390 + - name: tcp + port: 31400 + targetPort: 31400 + nodePort: 31400 + - name: tls + port: 15443 + targetPort: 15443 + istiodRemote: + enabled: {{ .Values.istiodRemote.enabled }} + pilot: + enabled: {{ .Values.pilot.enabled }} + hub: {{ .Values.systemDefaultRegistry | default "docker.io" }} + profile: default + tag: {{ .Values.tag }} + revision: {{ .Values.revision }} + meshConfig: + defaultConfig: + proxyMetadata: + {{- if .Values.dns.enabled }} + ISTIO_META_DNS_CAPTURE: "true" + {{- end }} + values: + gateways: + istio-egressgateway: + name: istio-egressgateway + type: {{ .Values.egressGateways.type }} + istio-ingressgateway: + name: istio-ingressgateway + type: {{ .Values.ingressGateways.type }} + global: + istioNamespace: {{ template "istio.namespace" . }} + proxy: + image: {{ template "system_default_registry" . }}{{ .Values.global.proxy.repository }}:{{ .Values.global.proxy.tag }} + proxy_init: + image: {{ template "system_default_registry" . }}{{ .Values.global.proxy_init.repository }}:{{ .Values.global.proxy_init.tag }} + {{- if .Values.global.defaultPodDisruptionBudget.enabled }} + defaultPodDisruptionBudget: + enabled: {{ .Values.global.defaultPodDisruptionBudget.enabled }} + {{- end }} + {{- if .Values.pilot.enabled }} + pilot: + image: {{ template "system_default_registry" . }}{{ .Values.pilot.repository }}:{{ .Values.pilot.tag }} + {{- end }} + telemetry: + enabled: {{ .Values.telemetry.enabled }} + v2: + enabled: {{ .Values.telemetry.v2.enabled }} + {{- if .Values.cni.enabled }} + cni: + image: {{ template "system_default_registry" . }}{{ .Values.cni.repository }}:{{ .Values.cni.tag }} + excludeNamespaces: + {{- toYaml .Values.cni.excludeNamespaces | nindent 8 }} + logLevel: {{ .Values.cni.logLevel }} + {{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/requirements.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/requirements.yaml new file mode 100755 index 000000000..b60745780 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/requirements.yaml @@ -0,0 +1,17 @@ +dependencies: +- name: kiali + version: "" + repository: file://./charts/kiali + condition: kiali.enabled + tags: [] + enabled: false + importvalues: [] + alias: "" +- name: tracing + version: "" + repository: file://./charts/tracing + condition: tracing.enabled + tags: [] + enabled: false + importvalues: [] + alias: "" diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/samples/overlay-example.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/samples/overlay-example.yaml new file mode 100755 index 000000000..5cf3cf3b0 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/samples/overlay-example.yaml @@ -0,0 +1,37 @@ +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + components: + ingressGateways: + - enabled: true + name: ilb-gateway + namespace: user-ingressgateway-ns + k8s: + resources: + requests: + cpu: 200m + service: + ports: + - name: tcp-citadel-grpc-tls + port: 8060 + targetPort: 8060 + - name: tcp-dns + port: 5353 + serviceAnnotations: + cloud.google.com/load-balancer-type: internal + - enabled: true + name: other-gateway + namespace: cattle-istio-system + k8s: + resources: + requests: + cpu: 200m + service: + ports: + - name: tcp-citadel-grpc-tls + port: 8060 + targetPort: 8060 + - name: tcp-dns + port: 5353 + serviceAnnotations: + cloud.google.com/load-balancer-type: internal diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/_helpers.tpl b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/_helpers.tpl new file mode 100755 index 000000000..3f7af953a --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/_helpers.tpl @@ -0,0 +1,12 @@ +{{/* Ensure namespace is set the same everywhere */}} +{{- define "istio.namespace" -}} + {{- .Release.Namespace | default "istio-system" -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/admin-role.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/admin-role.yaml new file mode 100755 index 000000000..ad1313c4f --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/admin-role.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: istio-admin + namespace: {{ template "istio.namespace" . }} +rules: + - apiGroups: + - config.istio.io + resources: + - adapters + - attributemanifests + - handlers + - httpapispecbindings + - httpapispecs + - instances + - quotaspecbindings + - quotaspecs + - rules + - templates + verbs: ["get", "watch", "list"] + - apiGroups: + - networking.istio.io + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + - workloadentries + verbs: + - '*' + - apiGroups: + - security.istio.io + resources: + - authorizationpolicies + - peerauthentications + - requestauthentications + verbs: + - '*' diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/base-config-map.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/base-config-map.yaml new file mode 100755 index 000000000..5323917bc --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/base-config-map.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-installer-base + namespace: {{ template "istio.namespace" . }} +data: +{{ tpl (.Files.Glob "configs/*").AsConfig . | indent 2 }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/clusterrole.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/clusterrole.yaml new file mode 100755 index 000000000..a93b3df95 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/clusterrole.yaml @@ -0,0 +1,120 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-installer +rules: +# istio groups +- apiGroups: + - authentication.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - config.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - install.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - networking.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - rbac.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - security.istio.io + resources: + - '*' + verbs: + - '*' +# k8s groups +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions.apiextensions.k8s.io + - customresourcedefinitions + verbs: + - '*' +- apiGroups: + - apps + - extensions + resources: + - daemonsets + - deployments + - deployments/finalizers + - ingresses + - replicasets + - statefulsets + verbs: + - '*' +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - '*' +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - roles + - rolebindings + verbs: + - '*' +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - namespaces + - pods + - pods/exec + - persistentvolumeclaims + - secrets + - services + - serviceaccounts + verbs: + - '*' +- apiGroups: + - policy + resourceNames: + - istio-installer + resources: + - podsecuritypolicies + verbs: + - use diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/clusterrolebinding.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/clusterrolebinding.yaml new file mode 100755 index 000000000..9d74a0434 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-installer +subjects: +- kind: ServiceAccount + name: istio-installer + namespace: {{ template "istio.namespace" . }} +roleRef: + kind: ClusterRole + name: istio-installer + apiGroup: rbac.authorization.k8s.io diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/edit-role.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/edit-role.yaml new file mode 100755 index 000000000..d1059d58d --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/edit-role.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + namespace: {{ template "istio.namespace" . }} + name: istio-edit +rules: + - apiGroups: + - config.istio.io + resources: + - adapters + - attributemanifests + - handlers + - httpapispecbindings + - httpapispecs + - instances + - quotaspecbindings + - quotaspecs + - rules + - templates + verbs: ["get", "watch", "list"] + - apiGroups: + - networking.istio.io + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + - workloadentries + verbs: + - '*' + - apiGroups: + - security.istio.io + resources: + - authorizationpolicies + - peerauthentications + - requestauthentications + verbs: + - '*' diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-cni-psp.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-cni-psp.yaml new file mode 100755 index 000000000..5b94c8503 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-cni-psp.yaml @@ -0,0 +1,51 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp-istio-cni + namespace: {{ template "istio.namespace" . }} +spec: + allowPrivilegeEscalation: true + fsGroup: + rule: RunAsAny + hostNetwork: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - secret + - configMap + - emptyDir + - hostPath +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: psp-istio-cni + namespace: {{ template "istio.namespace" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: psp-istio-cni +subjects: + - kind: ServiceAccount + name: istio-cni +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: psp-istio-cni + namespace: {{ template "istio.namespace" . }} +rules: +- apiGroups: + - policy + resourceNames: + - psp-istio-cni + resources: + - podsecuritypolicies + verbs: + - use +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-install-job.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-install-job.yaml new file mode 100755 index 000000000..9a13f5698 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-install-job.yaml @@ -0,0 +1,50 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: istioctl-installer + namespace: {{ template "istio.namespace" . }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + backoffLimit: 1 + template: + spec: + containers: + - name: istioctl-installer + image: {{ template "system_default_registry" . }}{{ .Values.installer.repository }}:{{ .Values.installer.tag }} + env: + - name: RELEASE_NAME + value: {{ .Release.Name }} + - name: ISTIO_NAMESPACE + value: {{ template "istio.namespace" . }} + - name: FORCE_INSTALL + value: {{ .Values.forceInstall | default "false" | quote }} + command: ["/bin/sh","-c"] + args: ["/usr/local/app/scripts/run.sh"] + volumeMounts: + - name: config-volume + mountPath: /app/istio-base.yaml + subPath: istio-base.yaml + {{- if .Values.overlayFile }} + - name: overlay-volume + mountPath: /app/overlay-config.yaml + subPath: overlay-config.yaml + {{- end }} + volumes: + - name: config-volume + configMap: + name: istio-installer-base + {{- if .Values.overlayFile }} + - name: overlay-volume + configMap: + name: istio-installer-overlay + {{- end }} + serviceAccountName: istio-installer + {{- if .Values.global.rbac.pspEnabled }} + securityContext: + runAsUser: 101 + runAsGroup: 101 + {{- end }} + restartPolicy: Never diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-install-psp.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-install-psp.yaml new file mode 100755 index 000000000..f0b5ee565 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-install-psp.yaml @@ -0,0 +1,30 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: istio-installer + namespace: {{ template "istio.namespace" . }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'configMap' + - 'secret' +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-psp.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-psp.yaml new file mode 100755 index 000000000..b3758b74f --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-psp.yaml @@ -0,0 +1,81 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: istio-psp + namespace: {{ template "istio.namespace" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: istio-psp +subjects: + - kind: ServiceAccount + name: istio-egressgateway-service-account + - kind: ServiceAccount + name: istio-ingressgateway-service-account + - kind: ServiceAccount + name: istio-mixer-service-account + - kind: ServiceAccount + name: istio-operator-authproxy + - kind: ServiceAccount + name: istiod-service-account + - kind: ServiceAccount + name: istio-sidecar-injector-service-account + - kind: ServiceAccount + name: istiocoredns-service-account + - kind: ServiceAccount + name: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: istio-psp + namespace: {{ template "istio.namespace" . }} +rules: +- apiGroups: + - policy + resourceNames: + - istio-psp + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: istio-psp + namespace: {{ template "istio.namespace" . }} +spec: + allowPrivilegeEscalation: false + forbiddenSysctls: + - '*' + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + runAsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI + - persistentVolumeClaim +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-uninstall-job.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-uninstall-job.yaml new file mode 100755 index 000000000..a7f156325 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/istio-uninstall-job.yaml @@ -0,0 +1,45 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: istioctl-uninstaller + namespace: {{ template "istio.namespace" . }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + spec: + containers: + - name: istioctl-uninstaller + image: {{ template "system_default_registry" . }}{{ .Values.installer.repository }}:{{ .Values.installer.tag }} + env: + - name: RELEASE_NAME + value: {{ .Release.Name }} + - name: ISTIO_NAMESPACE + value: {{ template "istio.namespace" . }} + command: ["/bin/sh","-c"] + args: ["/usr/local/app/scripts/uninstall_istio_system.sh"] + volumeMounts: + - name: config-volume + mountPath: /app/istio-base.yaml + subPath: istio-base.yaml + {{- if .Values.overlayFile }} + - name: overlay-volume + mountPath: /app/overlay-config.yaml + subPath: overlay-config.yaml + {{ end }} + volumes: + - name: config-volume + configMap: + name: istio-installer-base + {{- if .Values.overlayFile }} + - name: overlay-volume + configMap: + name: istio-installer-overlay + {{ end }} + serviceAccountName: istio-installer + securityContext: + runAsUser: 101 + runAsGroup: 101 + restartPolicy: OnFailure diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/overlay-config-map.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/overlay-config-map.yaml new file mode 100755 index 000000000..287d26b2c --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/overlay-config-map.yaml @@ -0,0 +1,9 @@ +{{- if .Values.overlayFile }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-installer-overlay + namespace: {{ template "istio.namespace" . }} +data: + overlay-config.yaml: {{ toYaml .Values.overlayFile | indent 2 }} +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/service-monitors.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/service-monitors.yaml new file mode 100755 index 000000000..c3d60c4fc --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/service-monitors.yaml @@ -0,0 +1,51 @@ +{{- if .Values.kiali.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: envoy-stats-monitor + namespace: {{ template "istio.namespace" . }} + labels: + monitoring: istio-proxies +spec: + selector: + matchExpressions: + - {key: istio-prometheus-ignore, operator: DoesNotExist} + namespaceSelector: + any: true + jobLabel: envoy-stats + endpoints: + - path: /stats/prometheus + targetPort: 15090 + interval: 15s + relabelings: + - sourceLabels: [__meta_kubernetes_pod_container_port_name] + action: keep + regex: '.*-envoy-prom' + - action: labeldrop + regex: "__meta_kubernetes_pod_label_(.+)" + - sourceLabels: [__meta_kubernetes_namespace] + action: replace + targetLabel: namespace + - sourceLabels: [__meta_kubernetes_pod_name] + action: replace + targetLabel: pod_name +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: istio-component-monitor + namespace: {{ template "istio.namespace" . }} + labels: + monitoring: istio-components +spec: + jobLabel: istio + targetLabels: [app] + selector: + matchExpressions: + - {key: istio, operator: In, values: [pilot]} + namespaceSelector: + any: true + endpoints: + - port: http-monitoring + interval: 15s +{{- end -}} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/serviceaccount.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/serviceaccount.yaml new file mode 100755 index 000000000..82b6cbb7e --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-installer + namespace: {{ template "istio.namespace" . }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/view-role.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/view-role.yaml new file mode 100755 index 000000000..5947d3eba --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/templates/view-role.yaml @@ -0,0 +1,41 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + namespace: {{ template "istio.namespace" . }} + name: istio-view +rules: + - apiGroups: + - config.istio.io + resources: + - adapters + - attributemanifests + - handlers + - httpapispecbindings + - httpapispecs + - instances + - quotaspecbindings + - quotaspecs + - rules + - templates + verbs: ["get", "watch", "list"] + - apiGroups: + - networking.istio.io + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + - workloadentries + verbs: ["get", "watch", "list"] + - apiGroups: + - security.istio.io + resources: + - authorizationpolicies + - peerauthentications + - requestauthentications + verbs: ["get", "watch", "list"] diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.300/values.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.300/values.yaml new file mode 100755 index 000000000..6c3f66797 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.300/values.yaml @@ -0,0 +1,85 @@ +overlayFile: "" +tag: 1.9.3 +##Setting forceInstall: true will remove the check for istio version < 1.6.x and will not analyze your install cluster prior to install +forceInstall: false + +installer: + repository: rancher/istio-installer + tag: 1.9.3-rancher2 + +##Native support for dns added in 1.8 +dns: + enabled: false + +base: + enabled: true + +cni: + enabled: false + repository: rancher/mirrored-istio-install-cni + tag: 1.9.3 + logLevel: info + excludeNamespaces: + - istio-system + - kube-system + +egressGateways: + enabled: false + type: NodePort + +ingressGateways: + enabled: true + type: NodePort + +istiodRemote: + enabled: false + +pilot: + enabled: true + repository: rancher/mirrored-istio-pilot + tag: 1.9.3 + +telemetry: + enabled: true + v2: + enabled: true + +global: + cattle: + systemDefaultRegistry: "" + proxy: + repository: rancher/mirrored-istio-proxyv2 + tag: 1.9.3 + proxy_init: + repository: rancher/mirrored-istio-proxyv2 + tag: 1.9.3 + defaultPodDisruptionBudget: + enabled: true + rbac: + pspEnabled: false + +# Kiali subchart from rancher-kiali-server +kiali: + enabled: true + auth: + strategy: anonymous + deployment: + ingress_enabled: false + repository: rancher/mirrored-kiali-kiali + tag: v1.32.0 + external_services: + prometheus: + custom_metrics_url: "http://rancher-monitoring-prometheus.cattle-monitoring-system.svc:9090" + url: "http://rancher-monitoring-prometheus.cattle-monitoring-system.svc:9090" + tracing: + in_cluster_url: "http://tracing.istio-system.svc:16686/jaeger" + grafana: + in_cluster_url: "http://rancher-monitoring-grafana.cattle-monitoring-system.svc:80" + url: "http://rancher-monitoring-grafana.cattle-monitoring-system.svc:80" + +tracing: + enabled: false + contextPath: "/jaeger" + jaeger: + repository: rancher/mirrored-jaegertracing-all-in-one + tag: 1.20.0 diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/Chart.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/Chart.yaml new file mode 100755 index 000000000..07d961be4 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/Chart.yaml @@ -0,0 +1,21 @@ +annotations: + catalog.cattle.io/auto-install: rancher-kiali-server-crd=1.32.100 + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: Istio + catalog.cattle.io/namespace: istio-system + catalog.cattle.io/os: linux + catalog.cattle.io/provides-gvr: networking.istio.io.virtualservice/v1beta1 + catalog.cattle.io/release-name: rancher-istio + catalog.cattle.io/requests-cpu: 710m + catalog.cattle.io/requests-memory: 2314Mi + catalog.cattle.io/ui-component: istio +apiVersion: v1 +appVersion: 1.9.5 +description: A basic Istio setup that installs with the istioctl. Refer to https://istio.io/latest/ + for details. +icon: https://charts.rancher.io/assets/logos/istio.svg +keywords: +- networking +- infrastructure +name: rancher-istio +version: 1.9.500 diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/README.md b/charts/rancher-istio-1.9/rancher-istio/1.9.500/README.md new file mode 100755 index 000000000..199e45312 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/README.md @@ -0,0 +1,69 @@ +# Rancher Istio Installers + +A Rancher created chart that packages the istioctl binary to install via a helm chart. + +# Installation Requirements + +## Chart Dependencies +- rancher-kiali-server-crd chart + +# Uninstallation Requirements +To ensure rancher-istio uninstalls correctly, you must uninstall rancher-istio prior to uninstalling chart dependencies (see installation requirements for chart dependencies). This is because all definitions need to be available in order to properly build the rancher-istio objects for removal. + +If you remove dependent CRD charts prior to removing rancher-istio, you may encounter the following error:: + +`Error: uninstallation completed with 1 error(s): unable to build kubernetes objects for delete: unable to recognize "": no matches for kind "MonitoringDashboard" in version "monitoring.kiali.io/v1alpha1"` + +# Addons + +## Kiali + +Kiali allows you to view and manage your istio-based service mesh through an easy to use dashboard. + +#### Dependencies +- rancher-monitoring chart or other Prometheus installation + +This dependecy installs the required CRDs for installing Kiali. Since Kiali is bundled in with Istio in this chart, if you do not have these dependencies installed, your Istio installation will fail. If you do not plan on using Kiali, set `kiali.enabled=false` when installing Istio for a succesful installation. + +> **Note:** The following configuration options assume you have installed the dependecies for Kiali. Please ensure you have Promtheus in your cluster before proceeding. + +The Monitoring app sets `prometheus.prometheusSpec.ignoreNamespaceSelectors=false` which means all namespaces will be scraped by Prometheus by default. This ensures you can view traffic, metrics and graphs for resources deployed in other namespaces. + +To limit scraping to specific namespaces, set `prometheus.prometheusSpec.ignoreNamespaceSelectors=true` and add one of the following configurations to ensure you can continue to view traffic, metrics and graphs for your deployed resources. + +1. Add a Service Monitor or Pod Monitor in the namespace with the targets you want to scrape. +1. Add an additionalScrapeConfig to your rancher-monitoring instance to scrape all targets in all namespaces. + +#### External Services + +##### Prometheus +The `kiali.external_services.prometheus` url is set in the values.yaml: +``` +http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc:{{ prometheus.service.port }} +``` +The url depends on the default values for `nameOverride`, `namespaceOverride`, and `prometheus.service.port` being set in your rancher-monitoring or other monitoring instance. + +##### Grafana +The `kiali.external_services.grafana` url is set in the values.yaml: +``` +http://{{ .Values.nameOverride }}-grafana.{{ .Values.namespaceOverride }}.svc:{{ grafana.service.port }} +``` +The url depends on the default values for `nameOverride`, `namespaceOverride`, and `grafana.service.port` being set in your rancher-monitoring or other monitoring instance. + +##### Tracing +The `kiali.external_services.tracing` url and `.Values.tracing.contextPath` is set in the rancher-istio values.yaml: +``` +http://tracing.{{ .Values.namespaceOverride }}.svc:{{ .Values.service.externalPort }}/{{ .Values.tracing.contextPath }} +``` +The url depends on the default values for `namespaceOverride`, and `.Values.service.externalPort` being set in your rancher-tracing or other tracing instance. + +## Jaeger + +Jaeger allows you to trace and monitor distributed microservices. + +> **Note:** This addon is using the all-in-one Jaeger installation which is not qualified for production. Use the [Jaeger Tracing](https://www.jaegertracing.io/docs/1.21/getting-started/) documentation to determine which installation you will need for your production needs. + +# Installation +``` +helm install rancher-istio . --create-namespace -n istio-system +``` \ No newline at end of file diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/app-readme.md b/charts/rancher-istio-1.9/rancher-istio/1.9.500/app-readme.md new file mode 100755 index 000000000..0e42df083 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/app-readme.md @@ -0,0 +1,45 @@ +# Rancher Istio + +Our [Istio](https://istio.io/) installer wraps the istioctl binary commands in a handy helm chart, including an overlay file option to allow complex customization. It also includes: +* **[Kiali](https://kiali.io/)**: Used for graphing traffic flow throughout the mesh +* **[Jaeger](https://www.jaegertracing.io/)**: A quick start, all-in-one installation used for tracing distributed systemm. This is not production qualified, please refer to jaeger documentation to determine which installation you may need instead. + +### Dependencies + +**Rancher Monitoring or other Prometheus installation** + +The Prometheus CRDs are required for installing Kiali which is enabled by default. If you do not have Prometheus installed your Istio installation will fail. If you do not plan on using Kiali, set `kiali.enabled=false` to bypass this requirement. + +### Customization + +**Rancher Monitoring** + +The Rancher Monitoring app sets `prometheus.prometheusSpec.ignoreNamespaceSelectors=false` which means all namespaces will be scraped by Prometheus by default. This ensures you can view traffic, metrics and graphs for resources deployed in other namespaces. + +To limit scraping to specific namespaces, set `prometheus.prometheusSpec.ignoreNamespaceSelectors=true` and add one of the following configurations to ensure you can continue to view traffic, metrics and graphs for your deployed resources. + +1. Add a Service Monitor or Pod Monitor in the namespace with the targets you want to scrape. +1. Add an additionalScrapeConfig to your rancher-monitoring instance to scrape all targets in all namespaces. + +**Custom Prometheus Installation with Kiali** + +To use a custom Monitoring installation, set the `kiali.external_services.prometheus` url in the values.yaml. This url depends on the values for `nameOverride`, `namespaceOverride`, and `prometheus.service.port` in your rancher-monitoring or other monitoring instance: +``` +http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc:{{ prometheus.service.port }} +``` +**Custom Grafana Installation with Kiali** + +To use a custom Grafana installation, set the `kiali.external_services.grafana` url in the values.yaml. This url depends on the values for `nameOverride`, `namespaceOverride`, and `granfa.service.port` in your rancher-monitoring or other grafana instance: +``` +http://{{ .Values.nameOverride }}-grafana.{{ .Values.namespaceOverride }}.svc:{{ grafana.service.port }} +``` +**Custom Tracing Installation with Kiali** + +To use a custom Tracing installation, set the `kiali.external_services.tracing` url and update the `.Values.tracing.contextPath` in the rancher-istio values.yaml. + +This url depends on the values for `namespaceOverride`, and `.Values.service.externalPort` in your rancher-tracing or other tracing instance.: +``` +http://tracing.{{ .Values.namespaceOverride }}.svc:{{ .Values.service.externalPort }}/{{ .Values.tracing.contextPath }} +``` + +For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/istio/v2.5/). diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/Chart.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/Chart.yaml new file mode 100755 index 000000000..9b6fdf385 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/Chart.yaml @@ -0,0 +1,31 @@ +annotations: + catalog.cattle.io/auto-install: rancher-kiali-server-crd=match + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.cattle.io/provides-gvr: monitoringdashboards.monitoring.kiali.io/v1alpha1 + catalog.cattle.io/requires-gvr: monitoring.coreos.com.prometheus/v1 + catalog.rancher.io/namespace: cattle-istio-system + catalog.rancher.io/release-name: rancher-kiali-server +apiVersion: v2 +appVersion: v1.32.0 +description: Kiali is an open source project for service mesh observability, refer + to https://www.kiali.io for details. This is installed as sub-chart with customized + values in Rancher's Istio. +home: https://github.com/kiali/kiali +icon: https://raw.githubusercontent.com/kiali/kiali.io/master/themes/kiali/static/img/kiali_logo_masthead.png +keywords: +- istio +- kiali +- networking +- infrastructure +maintainers: +- email: kiali-users@googlegroups.com + name: Kiali + url: https://kiali.io +name: kiali +sources: +- https://github.com/kiali/kiali +- https://github.com/kiali/kiali-ui +- https://github.com/kiali/kiali-operator +- https://github.com/kiali/helm-charts +version: 1.32.1 diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/NOTES.txt b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/NOTES.txt new file mode 100755 index 000000000..751019401 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/NOTES.txt @@ -0,0 +1,5 @@ +Welcome to Kiali! For more details on Kiali, see: https://kiali.io + +The Kiali Server [{{ .Chart.AppVersion }}] has been installed in namespace [{{ .Release.Namespace }}]. It will be ready soon. + +(Helm: Chart=[{{ .Chart.Name }}], Release=[{{ .Release.Name }}], Version=[{{ .Chart.Version }}]) diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/_helpers.tpl b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/_helpers.tpl new file mode 100755 index 000000000..dd33bbe48 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/_helpers.tpl @@ -0,0 +1,192 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "kiali-server.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kiali-server.fullname" -}} +{{- if .Values.fullnameOverride }} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} + {{- $name := default .Chart.Name .Values.nameOverride }} + {{- printf "%s" $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kiali-server.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Identifies the log_level with the old verbose_mode and the new log_level considered. +*/}} +{{- define "kiali-server.logLevel" -}} +{{- if .Values.deployment.verbose_mode -}} +{{- .Values.deployment.verbose_mode -}} +{{- else -}} +{{- .Values.deployment.logger.log_level -}} +{{- end -}} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "kiali-server.labels" -}} +helm.sh/chart: {{ include "kiali-server.chart" . }} +app: {{ include "kiali-server.name" . }} +{{ include "kiali-server.selectorLabels" . }} +version: {{ .Values.deployment.version_label | default .Chart.AppVersion | quote }} +app.kubernetes.io/version: {{ .Values.deployment.version_label | default .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/part-of: "kiali" +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kiali-server.selectorLabels" -}} +app.kubernetes.io/name: {{ include "kiali-server.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Used to determine if a custom dashboard (defined in .Template.Name) should be deployed. +*/}} +{{- define "kiali-server.isDashboardEnabled" -}} +{{- if .Values.external_services.custom_dashboards.enabled }} + {{- $includere := "" }} + {{- range $_, $s := .Values.deployment.custom_dashboards.includes }} + {{- if $s }} + {{- if $includere }} + {{- $includere = printf "%s|^%s$" $includere ($s | replace "*" ".*" | replace "?" ".") }} + {{- else }} + {{- $includere = printf "^%s$" ($s | replace "*" ".*" | replace "?" ".") }} + {{- end }} + {{- end }} + {{- end }} + {{- $excludere := "" }} + {{- range $_, $s := .Values.deployment.custom_dashboards.excludes }} + {{- if $s }} + {{- if $excludere }} + {{- $excludere = printf "%s|^%s$" $excludere ($s | replace "*" ".*" | replace "?" ".") }} + {{- else }} + {{- $excludere = printf "^%s$" ($s | replace "*" ".*" | replace "?" ".") }} + {{- end }} + {{- end }} + {{- end }} + {{- if (and (mustRegexMatch (default "no-matches" $includere) (base .Template.Name)) (not (mustRegexMatch (default "no-matches" $excludere) (base .Template.Name)))) }} + {{- print "enabled" }} + {{- else }} + {{- print "" }} + {{- end }} +{{- else }} + {{- print "" }} +{{- end }} +{{- end }} + +{{/* +Determine the default login token signing key. +*/}} +{{- define "kiali-server.login_token.signing_key" -}} +{{- if .Values.login_token.signing_key }} + {{- .Values.login_token.signing_key }} +{{- else }} + {{- randAlphaNum 16 }} +{{- end }} +{{- end }} + +{{/* +Determine the default web root. +*/}} +{{- define "kiali-server.server.web_root" -}} +{{- if .Values.server.web_root }} + {{- .Values.server.web_root | trimSuffix "/" }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- "/" }} + {{- else }} + {{- "/kiali" }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Determine the default identity cert file. There is no default if on k8s; only on OpenShift. +*/}} +{{- define "kiali-server.identity.cert_file" -}} +{{- if hasKey .Values.identity "cert_file" }} + {{- .Values.identity.cert_file }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- "/kiali-cert/tls.crt" }} + {{- else }} + {{- "" }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Determine the default identity private key file. There is no default if on k8s; only on OpenShift. +*/}} +{{- define "kiali-server.identity.private_key_file" -}} +{{- if hasKey .Values.identity "private_key_file" }} + {{- .Values.identity.private_key_file }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- "/kiali-cert/tls.key" }} + {{- else }} + {{- "" }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Determine the istio namespace - default is where Kiali is installed. +*/}} +{{- define "kiali-server.istio_namespace" -}} +{{- if .Values.istio_namespace }} + {{- .Values.istio_namespace }} +{{- else }} + {{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{/* +Determine the auth strategy to use - default is "token" on Kubernetes and "openshift" on OpenShift. +*/}} +{{- define "kiali-server.auth.strategy" -}} +{{- if .Values.auth.strategy }} + {{- if (and (eq .Values.auth.strategy "openshift") (not .Values.kiali_route_url)) }} + {{- fail "You did not define what the Kiali Route URL will be (--set kiali_route_url=...). Without this set, the openshift auth strategy will not work. Either set that or use a different auth strategy via the --set auth.strategy=... option." }} + {{- end }} + {{- .Values.auth.strategy }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- if not .Values.kiali_route_url }} + {{- fail "You did not define what the Kiali Route URL will be (--set kiali_route_url=...). Without this set, the openshift auth strategy will not work. Either set that or explicitly indicate another auth strategy you want via the --set auth.strategy=... option." }} + {{- end }} + {{- "openshift" }} + {{- else }} + {{- "token" }} + {{- end }} +{{- end }} +{{- end }} + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/cabundle.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/cabundle.yaml new file mode 100755 index 000000000..7462b95a7 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/cabundle.yaml @@ -0,0 +1,13 @@ +{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kiali-server.fullname" . }}-cabundle + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + annotations: + service.beta.openshift.io/inject-cabundle: "true" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/configmap.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/configmap.yaml new file mode 100755 index 000000000..b1bf53173 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/configmap.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +data: + config.yaml: | + {{- /* Most of .Values is simply the ConfigMap - strip out the keys that are not part of the ConfigMap */}} + {{- $cm := omit .Values "nameOverride" "fullnameOverride" "kiali_route_url" }} + {{- /* The helm chart defines namespace for us, but pass it to the ConfigMap in case the server needs it */}} + {{- $_ := set $cm.deployment "namespace" .Release.Namespace }} + {{- /* Some values of the ConfigMap are generated, but might not be identical, from .Values */}} + {{- $_ := set $cm "istio_namespace" (include "kiali-server.istio_namespace" .) }} + {{- $_ := set $cm.auth "strategy" (include "kiali-server.auth.strategy" .) }} + {{- $_ := set $cm.auth.openshift "client_id_prefix" (include "kiali-server.fullname" .) }} + {{- $_ := set $cm.identity "cert_file" (include "kiali-server.identity.cert_file" .) }} + {{- $_ := set $cm.identity "private_key_file" (include "kiali-server.identity.private_key_file" .) }} + {{- $_ := set $cm.login_token "signing_key" (include "kiali-server.login_token.signing_key" .) }} + {{- $_ := set $cm.server "web_root" (include "kiali-server.server.web_root" .) }} + {{- toYaml $cm | nindent 4 }} +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/envoy.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/envoy.yaml new file mode 100755 index 000000000..85b402017 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/envoy.yaml @@ -0,0 +1,56 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: envoy + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Envoy Metrics + discoverOn: "envoy_server_uptime" + items: + - chart: + name: "Pods uptime" + spans: 4 + metricName: "envoy_server_uptime" + dataType: "raw" + - chart: + name: "Allocated memory" + unit: "bytes" + spans: 4 + metricName: "envoy_server_memory_allocated" + dataType: "raw" + min: 0 + - chart: + name: "Heap size" + unit: "bytes" + spans: 4 + metricName: "envoy_server_memory_heap_size" + dataType: "raw" + min: 0 + - chart: + name: "Upstream active connections" + spans: 6 + metricName: "envoy_cluster_upstream_cx_active" + dataType: "raw" + - chart: + name: "Upstream total requests" + spans: 6 + metricName: "envoy_cluster_upstream_rq_total" + unit: "rps" + dataType: "rate" + - chart: + name: "Downstream active connections" + spans: 6 + metricName: "envoy_listener_downstream_cx_active" + dataType: "raw" + - chart: + name: "Downstream HTTP requests" + spans: 6 + metricName: "envoy_listener_http_downstream_rq" + unit: "rps" + dataType: "rate" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/go.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/go.yaml new file mode 100755 index 000000000..2d2f42a93 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/go.yaml @@ -0,0 +1,67 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: go + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Go Metrics + runtime: Go + discoverOn: "go_info" + items: + - chart: + name: "CPU ratio" + spans: 6 + metricName: "process_cpu_seconds_total" + dataType: "rate" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "RSS Memory" + unit: "bytes" + spans: 6 + metricName: "process_resident_memory_bytes" + dataType: "raw" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "Goroutines" + spans: 6 + metricName: "go_goroutines" + dataType: "raw" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "Heap allocation rate" + unit: "bytes/s" + spans: 6 + metricName: "go_memstats_alloc_bytes_total" + dataType: "rate" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "GC rate" + spans: 6 + metricName: "go_gc_duration_seconds_count" + dataType: "rate" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "Next GC" + unit: "bytes" + spans: 6 + metricName: "go_memstats_next_gc_bytes" + dataType: "raw" + aggregations: + - label: "pod_name" + displayName: "Pod" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/kiali.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/kiali.yaml new file mode 100755 index 000000000..b1f011b4f --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/kiali.yaml @@ -0,0 +1,44 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: kiali + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Kiali Internal Metrics + items: + - chart: + name: "API processing duration" + unit: "seconds" + spans: 6 + metricName: "kiali_api_processing_duration_seconds" + dataType: "histogram" + aggregations: + - label: "route" + displayName: "Route" + - chart: + name: "Functions processing duration" + unit: "seconds" + spans: 6 + metricName: "kiali_go_function_processing_duration_seconds" + dataType: "histogram" + aggregations: + - label: "function" + displayName: "Function" + - label: "package" + displayName: "Package" + - chart: + name: "Failures" + spans: 12 + metricName: "kiali_go_function_failures_total" + dataType: "raw" + aggregations: + - label: "function" + displayName: "Function" + - label: "package" + displayName: "Package" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml new file mode 100755 index 000000000..2e1ed5cff --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml @@ -0,0 +1,43 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: micrometer-1.0.6-jvm-pool + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: JVM + title: JVM Pool Metrics + discoverOn: "jvm_buffer_total_capacity_bytes" + items: + - chart: + name: "Pool buffer memory used" + unit: "bytes" + spans: 4 + metricName: "jvm_buffer_memory_used_bytes" + dataType: "raw" + aggregations: + - label: "id" + displayName: "Pool" + - chart: + name: "Pool buffer capacity" + unit: "bytes" + spans: 4 + metricName: "jvm_buffer_total_capacity_bytes" + dataType: "raw" + aggregations: + - label: "id" + displayName: "Pool" + - chart: + name: "Pool buffer count" + unit: "bytes" + spans: 4 + metricName: "jvm_buffer_count" + dataType: "raw" + aggregations: + - label: "id" + displayName: "Pool" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml new file mode 100755 index 000000000..d64596882 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/micrometer-1.0.6-jvm.yaml @@ -0,0 +1,65 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: micrometer-1.0.6-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: JVM + title: JVM Metrics + discoverOn: "jvm_threads_live" + items: + - chart: + name: "Total live threads" + spans: 4 + metricName: "jvm_threads_live" + dataType: "raw" + - chart: + name: "Daemon threads" + spans: 4 + metricName: "jvm_threads_daemon" + dataType: "raw" + - chart: + name: "Loaded classes" + spans: 4 + metricName: "jvm_classes_loaded" + dataType: "raw" + + - chart: + name: "Memory used" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_used_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory commited" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_committed_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory max" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_max_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml new file mode 100755 index 000000000..76e8d0a4a --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/micrometer-1.1-jvm.yaml @@ -0,0 +1,68 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: micrometer-1.1-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: JVM + title: JVM Metrics + discoverOn: "jvm_threads_live_threads" + items: + - chart: + name: "Memory used" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_used_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory commited" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_committed_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory max" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_max_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + + - chart: + name: "Total live threads" + spans: 4 + metricName: "jvm_threads_live_threads" + dataType: "raw" + - chart: + name: "Daemon threads" + spans: 4 + metricName: "jvm_threads_daemon_threads" + dataType: "raw" + - chart: + name: "Threads states" + spans: 4 + metricName: "jvm_threads_states_threads" + dataType: "raw" + aggregations: + - label: "state" + displayName: "State" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/microprofile-1.1.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/microprofile-1.1.yaml new file mode 100755 index 000000000..1d4951196 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/microprofile-1.1.yaml @@ -0,0 +1,59 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: microprofile-1.1 + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: MicroProfile Metrics + runtime: MicroProfile + discoverOn: "base:thread_count" + items: + - chart: + name: "Current loaded classes" + spans: 6 + metricName: "base:classloader_current_loaded_class_count" + dataType: "raw" + - chart: + name: "Unloaded classes" + spans: 6 + metricName: "base:classloader_total_unloaded_class_count" + dataType: "raw" + - chart: + name: "Thread count" + spans: 4 + metricName: "base:thread_count" + dataType: "raw" + - chart: + name: "Thread max count" + spans: 4 + metricName: "base:thread_max_count" + dataType: "raw" + - chart: + name: "Thread daemon count" + spans: 4 + metricName: "base:thread_daemon_count" + dataType: "raw" + - chart: + name: "Committed heap" + unit: "bytes" + spans: 4 + metricName: "base:memory_committed_heap_bytes" + dataType: "raw" + - chart: + name: "Max heap" + unit: "bytes" + spans: 4 + metricName: "base:memory_max_heap_bytes" + dataType: "raw" + - chart: + name: "Used heap" + unit: "bytes" + spans: 4 + metricName: "base:memory_used_heap_bytes" + dataType: "raw" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/microprofile-x.y.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/microprofile-x.y.yaml new file mode 100755 index 000000000..57ddc60ef --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/microprofile-x.y.yaml @@ -0,0 +1,38 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: microprofile-x.y + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: MicroProfile Metrics + runtime: MicroProfile + discoverOn: "base:gc_complete_scavenger_count" + items: + - chart: + name: "Young GC time" + unit: "seconds" + spans: 3 + metricName: "base:gc_young_generation_scavenger_time_seconds" + dataType: "raw" + - chart: + name: "Young GC count" + spans: 3 + metricName: "base:gc_young_generation_scavenger_count" + dataType: "raw" + - chart: + name: "Total GC time" + unit: "seconds" + spans: 3 + metricName: "base:gc_complete_scavenger_time_seconds" + dataType: "raw" + - chart: + name: "Total GC count" + spans: 3 + metricName: "base:gc_complete_scavenger_count" + dataType: "raw" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/nodejs.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/nodejs.yaml new file mode 100755 index 000000000..1ffe0aa10 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/nodejs.yaml @@ -0,0 +1,59 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: nodejs + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Node.js + title: Node.js Metrics + discoverOn: "nodejs_active_handles_total" + items: + - chart: + name: "Active handles" + spans: 4 + metricName: "nodejs_active_handles_total" + dataType: "raw" + - chart: + name: "Active requests" + spans: 4 + metricName: "nodejs_active_requests_total" + dataType: "raw" + - chart: + name: "Event loop lag" + unit: "seconds" + spans: 4 + metricName: "nodejs_eventloop_lag_seconds" + dataType: "raw" + - chart: + name: "Total heap size" + unit: "bytes" + spans: 12 + metricName: "nodejs_heap_space_size_total_bytes" + dataType: "raw" + aggregations: + - label: "space" + displayName: "Space" + - chart: + name: "Used heap size" + unit: "bytes" + spans: 6 + metricName: "nodejs_heap_space_size_used_bytes" + dataType: "raw" + aggregations: + - label: "space" + displayName: "Space" + - chart: + name: "Available heap size" + unit: "bytes" + spans: 6 + metricName: "nodejs_heap_space_size_available_bytes" + dataType: "raw" + aggregations: + - label: "space" + displayName: "Space" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/quarkus.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/quarkus.yaml new file mode 100755 index 000000000..cef5f3dce --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/quarkus.yaml @@ -0,0 +1,33 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: quarkus + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Quarkus Metrics + runtime: Quarkus + items: + - chart: + name: "Thread count" + spans: 4 + metricName: "vendor:thread_count" + dataType: "raw" + - chart: + name: "Used heap" + unit: "bytes" + spans: 4 + metricName: "vendor:memory_heap_usage_bytes" + dataType: "raw" + - chart: + name: "Used non-heap" + unit: "bytes" + spans: 4 + metricName: "vendor:memory_non_heap_usage_bytes" + dataType: "raw" + - include: "microprofile-x.y" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml new file mode 100755 index 000000000..42d87d890 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/springboot-jvm-pool.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: springboot-jvm-pool + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Spring Boot + title: JVM Pool Metrics + items: + - include: "micrometer-1.0.6-jvm-pool" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/springboot-jvm.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/springboot-jvm.yaml new file mode 100755 index 000000000..ced3acdd9 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/springboot-jvm.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: springboot-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Spring Boot + title: JVM Metrics + items: + - include: "micrometer-1.0.6-jvm" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/springboot-tomcat.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/springboot-tomcat.yaml new file mode 100755 index 000000000..c07016aa2 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/springboot-tomcat.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: springboot-tomcat + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Spring Boot + title: Tomcat Metrics + items: + - include: "tomcat" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/thorntail.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/thorntail.yaml new file mode 100755 index 000000000..6bd85e6f5 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/thorntail.yaml @@ -0,0 +1,22 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: thorntail + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Thorntail + title: Thorntail Metrics + discoverOn: "vendor:loaded_modules" + items: + - include: "microprofile-1.1" + - chart: + name: "Loaded modules" + spans: 6 + metricName: "vendor:loaded_modules" + dataType: "raw" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/tomcat.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/tomcat.yaml new file mode 100755 index 000000000..9a803342f --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/tomcat.yaml @@ -0,0 +1,67 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: tomcat + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Tomcat + title: Tomcat Metrics + discoverOn: "tomcat_sessions_created_total" + items: + - chart: + name: "Sessions created" + spans: 4 + metricName: "tomcat_sessions_created_total" + dataType: "raw" + - chart: + name: "Active sessions" + spans: 4 + metricName: "tomcat_sessions_active_current" + dataType: "raw" + - chart: + name: "Sessions rejected" + spans: 4 + metricName: "tomcat_sessions_rejected_total" + dataType: "raw" + + - chart: + name: "Bytes sent" + unit: "bitrate" + spans: 6 + metricName: "tomcat_global_sent_bytes_total" + dataType: "rate" + aggregations: + - label: "name" + displayName: "Name" + - chart: + name: "Bytes received" + unit: "bitrate" + spans: 6 + metricName: "tomcat_global_received_bytes_total" + dataType: "rate" + aggregations: + - label: "name" + displayName: "Name" + + - chart: + name: "Global errors" + spans: 6 + metricName: "tomcat_global_error_total" + dataType: "raw" + aggregations: + - label: "name" + displayName: "Name" + - chart: + name: "Servlet errors" + spans: 6 + metricName: "tomcat_servlet_error_total" + dataType: "raw" + aggregations: + - label: "name" + displayName: "Name" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-client.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-client.yaml new file mode 100755 index 000000000..2d591d6b0 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-client.yaml @@ -0,0 +1,60 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-client + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Client Metrics + discoverOn: "vertx_http_client_connections" + items: + - chart: + name: "Client response time" + unit: "seconds" + spans: 6 + metricName: "vertx_http_client_responseTime_seconds" + dataType: "histogram" + aggregations: + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Client request count rate" + unit: "ops" + spans: 6 + metricName: "vertx_http_client_requestCount_total" + dataType: "rate" + aggregations: + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Client active connections" + spans: 6 + metricName: "vertx_http_client_connections" + dataType: "raw" + - chart: + name: "Client active websockets" + spans: 6 + metricName: "vertx_http_client_wsConnections" + dataType: "raw" + - chart: + name: "Client bytes sent" + unit: "bytes" + spans: 6 + metricName: "vertx_http_client_bytesSent" + dataType: "histogram" + - chart: + name: "Client bytes received" + unit: "bytes" + spans: 6 + metricName: "vertx_http_client_bytesReceived" + dataType: "histogram" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-eventbus.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-eventbus.yaml new file mode 100755 index 000000000..65f9ee2ec --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-eventbus.yaml @@ -0,0 +1,59 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-eventbus + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Eventbus Metrics + discoverOn: "vertx_eventbus_handlers" + items: + - chart: + name: "Event bus handlers" + spans: 6 + metricName: "vertx_eventbus_handlers" + dataType: "raw" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus pending messages" + spans: 6 + metricName: "vertx_eventbus_pending" + dataType: "raw" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus processing time" + unit: "seconds" + spans: 6 + metricName: "vertx_eventbus_processingTime_seconds" + dataType: "histogram" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus bytes read" + unit: "bytes" + spans: 6 + metricName: "vertx_eventbus_bytesRead" + dataType: "histogram" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus bytes written" + unit: "bytes" + spans: 6 + metricName: "vertx_eventbus_bytesWritten" + dataType: "histogram" + aggregations: + - label: "address" + displayName: "Eventbus address" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-jvm.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-jvm.yaml new file mode 100755 index 000000000..2663186f3 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-jvm.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: JVM Metrics + items: + - include: "micrometer-1.1-jvm" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-pool.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-pool.yaml new file mode 100755 index 000000000..f6af921b3 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-pool.yaml @@ -0,0 +1,68 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-pool + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Pools Metrics + discoverOn: "vertx_pool_ratio" + items: + - chart: + name: "Usage duration" + unit: "seconds" + spans: 6 + metricName: "vertx_pool_usage_seconds" + dataType: "histogram" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Usage ratio" + spans: 6 + metricName: "vertx_pool_ratio" + dataType: "raw" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Queue size" + spans: 6 + metricName: "vertx_pool_queue_size" + dataType: "raw" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Time in queue" + unit: "seconds" + spans: 6 + metricName: "vertx_pool_queue_delay_seconds" + dataType: "histogram" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Resources used" + spans: 6 + metricName: "vertx_pool_inUse" + dataType: "raw" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-server.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-server.yaml new file mode 100755 index 000000000..de6b89df9 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/dashboards/vertx-server.yaml @@ -0,0 +1,62 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-server + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Server Metrics + discoverOn: "vertx_http_server_connections" + items: + - chart: + name: "Server response time" + unit: "seconds" + spans: 6 + metricName: "vertx_http_server_responseTime_seconds" + dataType: "histogram" + aggregations: + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Server request count rate" + unit: "ops" + spans: 6 + metricName: "vertx_http_server_requestCount_total" + dataType: "rate" + aggregations: + - label: "code" + displayName: "Error code" + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Server active connections" + spans: 6 + metricName: "vertx_http_server_connections" + dataType: "raw" + - chart: + name: "Server active websockets" + spans: 6 + metricName: "vertx_http_server_wsConnections" + dataType: "raw" + - chart: + name: "Server bytes sent" + unit: "bytes" + spans: 6 + metricName: "vertx_http_server_bytesSent" + dataType: "histogram" + - chart: + name: "Server bytes received" + unit: "bytes" + spans: 6 + metricName: "vertx_http_server_bytesReceived" + dataType: "histogram" +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/deployment.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/deployment.yaml new file mode 100755 index 000000000..100c57922 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/deployment.yaml @@ -0,0 +1,174 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.deployment.replicas }} + selector: + matchLabels: + {{- include "kiali-server.selectorLabels" . | nindent 6 }} + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + name: {{ include "kiali-server.fullname" . }} + labels: + {{- include "kiali-server.labels" . | nindent 8 }} + {{- if .Values.deployment.pod_labels }} + {{- toYaml .Values.deployment.pod_labels | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.server.metrics_enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ .Values.server.metrics_port | quote }} + {{- else }} + prometheus.io/scrape: "false" + prometheus.io/port: "" + {{- end }} + kiali.io/runtimes: go,kiali + {{- if .Values.deployment.pod_annotations }} + {{- toYaml .Values.deployment.pod_annotations | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "kiali-server.fullname" . }} + {{- if .Values.deployment.priority_class_name }} + priorityClassName: {{ .Values.deployment.priority_class_name | quote }} + {{- end }} + {{- if .Values.deployment.image_pull_secrets }} + imagePullSecrets: + {{- range .Values.deployment.image_pull_secrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - image: "{{ template "system_default_registry" . }}{{ .Values.deployment.repository }}:{{ .Values.deployment.tag }}" + imagePullPolicy: {{ .Values.deployment.image_pull_policy | default "Always" }} + name: {{ include "kiali-server.fullname" . }} + command: + - "/opt/kiali/kiali" + - "-config" + - "/kiali-configuration/config.yaml" + ports: + - name: api-port + containerPort: {{ .Values.server.port | default 20001 }} + {{- if .Values.server.metrics_enabled }} + - name: http-metrics + containerPort: {{ .Values.server.metrics_port | default 9090 }} + {{- end }} + readinessProbe: + httpGet: + path: {{ include "kiali-server.server.web_root" . | trimSuffix "/" }}/healthz + port: api-port + {{- if (include "kiali-server.identity.cert_file" .) }} + scheme: HTTPS + {{- else }} + scheme: HTTP + {{- end }} + initialDelaySeconds: 5 + periodSeconds: 30 + livenessProbe: + httpGet: + path: {{ include "kiali-server.server.web_root" . | trimSuffix "/" }}/healthz + port: api-port + {{- if (include "kiali-server.identity.cert_file" .) }} + scheme: HTTPS + {{- else }} + scheme: HTTP + {{- end }} + initialDelaySeconds: 5 + periodSeconds: 30 + env: + - name: ACTIVE_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "{{ include "kiali-server.logLevel" . }}" + - name: LOG_FORMAT + value: "{{ .Values.deployment.logger.log_format }}" + - name: LOG_TIME_FIELD_FORMAT + value: "{{ .Values.deployment.logger.time_field_format }}" + - name: LOG_SAMPLER_RATE + value: "{{ .Values.deployment.logger.sampler_rate }}" + volumeMounts: + {{- if .Values.web_root_override }} + - name: kiali-console + subPath: env.js + mountPath: /opt/kiali/console/env.js + {{- end }} + - name: {{ include "kiali-server.fullname" . }}-configuration + mountPath: "/kiali-configuration" + - name: {{ include "kiali-server.fullname" . }}-cert + mountPath: "/kiali-cert" + - name: {{ include "kiali-server.fullname" . }}-secret + mountPath: "/kiali-secret" + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + - name: {{ include "kiali-server.fullname" . }}-cabundle + mountPath: "/kiali-cabundle" + {{- end }} + {{- if .Values.deployment.resources }} + resources: + {{- toYaml .Values.deployment.resources | nindent 10 }} + {{- end }} + volumes: + {{- if .Values.web_root_override }} + - name: kiali-console + configMap: + name: kiali-console + items: + - key: env.js + path: env.js + {{- end }} + - name: {{ include "kiali-server.fullname" . }}-configuration + configMap: + name: {{ include "kiali-server.fullname" . }} + - name: {{ include "kiali-server.fullname" . }}-cert + secret: + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + secretName: {{ include "kiali-server.fullname" . }}-cert-secret + {{- else }} + secretName: istio.{{ include "kiali-server.fullname" . }}-service-account + {{- end }} + {{- if not (include "kiali-server.identity.cert_file" .) }} + optional: true + {{- end }} + - name: {{ include "kiali-server.fullname" . }}-secret + secret: + secretName: {{ .Values.deployment.secret_name }} + optional: true + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + - name: {{ include "kiali-server.fullname" . }}-cabundle + configMap: + name: {{ include "kiali-server.fullname" . }}-cabundle + {{- end }} + {{- if or (.Values.deployment.affinity.node) (or (.Values.deployment.pod) (.Values.deployment.pod_anti)) }} + affinity: + {{- if .Values.deployment.affinity.node }} + nodeAffinity: + {{- toYaml .Values.deployment.affinity.node | nindent 10 }} + {{- end }} + {{- if .Values.deployment.affinity.pod }} + podAffinity: + {{- toYaml .Values.deployment.affinity.pod | nindent 10 }} + {{- end }} + {{- if .Values.deployment.affinity.pod_anti }} + podAntiAffinity: + {{- toYaml .Values.deployment.affinity.pod_anti | nindent 10 }} + {{- end }} + {{- end }} + {{- if .Values.deployment.tolerations }} + tolerations: + {{- toYaml .Values.deployment.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.deployment.node_selector }} + nodeSelector: + {{- toYaml .Values.deployment.node_selector | nindent 8 }} + {{- end }} +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/hpa.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/hpa.yaml new file mode 100755 index 000000000..934c4c1e9 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/hpa.yaml @@ -0,0 +1,17 @@ +{{- if .Values.deployment.hpa.spec }} +--- +apiVersion: {{ .Values.deployment.hpa.api_version }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "kiali-server.fullname" . }} + {{- toYaml .Values.deployment.hpa.spec | nindent 2 }} +... +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/ingress.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/ingress.yaml new file mode 100755 index 000000000..e4c98db1b --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/ingress.yaml @@ -0,0 +1,40 @@ +{{- if not (.Capabilities.APIVersions.Has "route.openshift.io/v1") }} +{{- if .Values.deployment.ingress_enabled }} +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + annotations: + {{- if hasKey .Values.deployment.override_ingress_yaml.metadata "annotations" }} + {{- toYaml .Values.deployment.override_ingress_yaml.metadata.annotations | nindent 4 }} + {{- else }} + # For ingress-nginx versions older than 0.20.0 use secure-backends. + # (see: https://github.com/kubernetes/ingress-nginx/issues/3416#issuecomment-438247948) + # For ingress-nginx versions 0.20.0 and later use backend-protocol. + {{- if (include "kiali-server.identity.cert_file" .) }} + nginx.ingress.kubernetes.io/secure-backends: "true" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + {{- else }} + nginx.ingress.kubernetes.io/secure-backends: "false" + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" + {{- end }} + {{- end }} +spec: + {{- if hasKey .Values.deployment.override_ingress_yaml "spec" }} + {{- toYaml .Values.deployment.override_ingress_yaml.spec | nindent 2 }} + {{- else }} + rules: + - http: + paths: + - path: {{ include "kiali-server.server.web_root" . }} + backend: + serviceName: {{ include "kiali-server.fullname" . }} + servicePort: {{ .Values.server.port }} + {{- end }} +... +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/oauth.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/oauth.yaml new file mode 100755 index 000000000..a178bb85e --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/oauth.yaml @@ -0,0 +1,17 @@ +{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} +{{- if .Values.kiali_route_url }} +--- +apiVersion: oauth.openshift.io/v1 +kind: OAuthClient +metadata: + name: {{ include "kiali-server.fullname" . }}-{{ .Release.Namespace }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +redirectURIs: +- {{ .Values.kiali_route_url }} +grantMethod: auto +allowAnyScope: true +... +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/psp.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/psp.yaml new file mode 100755 index 000000000..f891892cc --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/psp.yaml @@ -0,0 +1,67 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "kiali-server.fullname" . }}-psp + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "kiali-server.fullname" . }}-psp +subjects: + - kind: ServiceAccount + name: kiali +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "kiali-server.fullname" . }}-psp + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: + - policy + resourceNames: + - {{ include "kiali-server.fullname" . }}-psp + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "kiali-server.fullname" . }}-psp + namespace: {{ .Release.Namespace }} +spec: + allowPrivilegeEscalation: false + forbiddenSysctls: + - '*' + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + runAsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI + - persistentVolumeClaim +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/role-controlplane.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/role-controlplane.yaml new file mode 100755 index 000000000..a22c76756 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/role-controlplane.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "kiali-server.fullname" . }}-controlplane + namespace: {{ include "kiali-server.istio_namespace" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: + - secrets + verbs: + - list +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/role-viewer.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/role-viewer.yaml new file mode 100755 index 000000000..9fdd9fd1d --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/role-viewer.yaml @@ -0,0 +1,97 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "kiali-server.fullname" . }}-viewer + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: + - configmaps + - endpoints + - namespaces + - nodes + - pods + - pods/log + - pods/proxy + - replicationcontrollers + - services + verbs: + - get + - list + - watch +- apiGroups: [""] + resources: + - pods/portforward + verbs: + - create + - post +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch +- apiGroups: ["batch"] + resources: + - cronjobs + - jobs + verbs: + - get + - list + - watch +- apiGroups: + - networking.istio.io + - security.istio.io + resources: ["*"] + verbs: + - get + - list + - watch +- apiGroups: ["apps.openshift.io"] + resources: + - deploymentconfigs + verbs: + - get + - list + - watch +- apiGroups: ["project.openshift.io"] + resources: + - projects + verbs: + - get +- apiGroups: ["route.openshift.io"] + resources: + - routes + verbs: + - get +- apiGroups: ["monitoring.kiali.io"] + resources: + - monitoringdashboards + verbs: + - get + - list +- apiGroups: ["iter8.tools"] + resources: + - experiments + verbs: + - get + - list +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: + - create +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/role.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/role.yaml new file mode 100755 index 000000000..8444bc753 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/role.yaml @@ -0,0 +1,108 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "kiali-server.fullname" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: + - configmaps + - endpoints + - namespaces + - nodes + - pods + - pods/log + - pods/proxy + - replicationcontrollers + - services + verbs: + - get + - list + - patch + - watch +- apiGroups: [""] + resources: + - pods/portforward + verbs: + - create + - post +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - patch + - watch +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch +- apiGroups: ["batch"] + resources: + - cronjobs + - jobs + verbs: + - get + - list + - patch + - watch +- apiGroups: + - networking.istio.io + - security.istio.io + resources: ["*"] + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: ["apps.openshift.io"] + resources: + - deploymentconfigs + verbs: + - get + - list + - patch + - watch +- apiGroups: ["project.openshift.io"] + resources: + - projects + verbs: + - get +- apiGroups: ["route.openshift.io"] + resources: + - routes + verbs: + - get +- apiGroups: ["monitoring.kiali.io"] + resources: + - monitoringdashboards + verbs: + - get + - list +- apiGroups: ["iter8.tools"] + resources: + - experiments + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: + - create +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/rolebinding-controlplane.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/rolebinding-controlplane.yaml new file mode 100755 index 000000000..5a0015836 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/rolebinding-controlplane.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "kiali-server.fullname" . }}-controlplane + namespace: {{ include "kiali-server.istio_namespace" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "kiali-server.fullname" . }}-controlplane +subjects: +- kind: ServiceAccount + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/rolebinding.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/rolebinding.yaml new file mode 100755 index 000000000..1eaabd65f --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/rolebinding.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "kiali-server.fullname" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + {{- if .Values.deployment.view_only_mode }} + name: {{ include "kiali-server.fullname" . }}-viewer + {{- else }} + name: {{ include "kiali-server.fullname" . }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/route.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/route.yaml new file mode 100755 index 000000000..27940dc96 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/route.yaml @@ -0,0 +1,30 @@ +{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} +{{- if .Values.deployment.ingress_enabled }} +# As of OpenShift 4.5, need to use --disable-openapi-validation when installing via Helm +--- +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + {{- if hasKey .Values.deployment.override_ingress_yaml.metadata "annotations" }}} + annotations: + {{- toYaml .Values.deployment.override_ingress_yaml.metadata.annotations | nindent 4 }} + {{- end }} +spec: + {{- if hasKey .Values.deployment.override_ingress_yaml "spec" }} + {{- toYaml .Values.deployment.override_ingress_yaml.spec | nindent 2 }} + {{- else }} + tls: + termination: reencrypt + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + targetPort: {{ .Values.server.port }} + name: {{ include "kiali-server.fullname" . }} + {{- end }} +... +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/service.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/service.yaml new file mode 100755 index 000000000..9ccf4f388 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/service.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + annotations: + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + service.beta.openshift.io/serving-cert-secret-name: {{ include "kiali-server.fullname" . }}-cert-secret + {{- end }} + kiali.io/api-spec: https://kiali.io/api + kiali.io/api-type: rest + {{- if and (not (empty .Values.server.web_fqdn)) (not (empty .Values.server.web_schema)) }} + {{- if empty .Values.server.web_port }} + kiali.io/external-url: {{ .Values.server.web_schema }}://{{ .Values.server.web_fqdn }}{{ default "" .Values.server.web_root }} + {{- else }} + kiali.io/external-url: {{ .Values.server.web_schema }}://{{ .Values.server.web_fqdn }}:{{ .Values.server.web_port }}{{(default "" .Values.server.web_root) }} + {{- end }} + {{- end }} + {{- if .Values.deployment.service_annotations }} + {{- toYaml .Values.deployment.service_annotations | nindent 4 }} + {{- end }} +spec: + {{- if .Values.deployment.service_type }} + type: {{ .Values.deployment.service_type }} + {{- end }} + ports: + {{- if (include "kiali-server.identity.cert_file" .) }} + - name: tcp + {{- else }} + - name: http + {{- end }} + protocol: TCP + port: {{ .Values.server.port }} + {{- if .Values.server.metrics_enabled }} + - name: http-metrics + protocol: TCP + port: {{ .Values.server.metrics_port }} + {{- end }} + selector: + {{- include "kiali-server.selectorLabels" . | nindent 4 }} + {{- if .Values.deployment.additional_service_yaml }} + {{- toYaml .Values.deployment.additional_service_yaml | nindent 2 }} + {{- end }} +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/serviceaccount.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/serviceaccount.yaml new file mode 100755 index 000000000..9151b6f6a --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/serviceaccount.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +... diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/validate-install-crd.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/validate-install-crd.yaml new file mode 100755 index 000000000..b42eeb266 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/validate-install-crd.yaml @@ -0,0 +1,14 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +# {{- $found := dict -}} +# {{- set $found "monitoring.kiali.io/v1alpha1/MonitoringDashboard" false -}} +# {{- range .Capabilities.APIVersions -}} +# {{- if hasKey $found (toString .) -}} +# {{- set $found (toString .) true -}} +# {{- end -}} +# {{- end -}} +# {{- range $_, $exists := $found -}} +# {{- if (eq $exists false) -}} +# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}} +# {{- end -}} +# {{- end -}} +#{{- end -}} \ No newline at end of file diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/web-root-configmap.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/web-root-configmap.yaml new file mode 100755 index 000000000..970d4e4f5 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/templates/web-root-configmap.yaml @@ -0,0 +1,12 @@ +{{- if .Values.web_root_override }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: kiali-console + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +data: + env.js: | + window.WEB_ROOT='/k8s/clusters/{{ .Values.global.cattle.clusterId }}/api/v1/namespaces/{{ .Release.Namespace }}/services/http:kiali:20001/proxy/kiali'; +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/values.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/values.yaml new file mode 100755 index 000000000..aada4e09a --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/kiali/values.yaml @@ -0,0 +1,93 @@ +nameOverride: "kiali" +fullnameOverride: "kiali" + +# This is required for "openshift" auth strategy. +# You have to know ahead of time what your Route URL will be because +# right now the helm chart can't figure this out at runtime (it would +# need to wait for the Kiali Route to be deployed and for OpenShift +# to start it up). If someone knows how to update this helm chart to +# do this, a PR would be welcome. +kiali_route_url: "" + +# rancher specific override that allows proxy access to kiali url +web_root_override: true + +# +# Settings that mimic the Kiali CR which are placed in the ConfigMap. +# Note that only those values used by the Helm Chart will be here. +# + +istio_namespace: "" # default is where Kiali is installed + +auth: + openid: {} + openshift: {} + strategy: "" + +deployment: + # This only limits what Kiali will attempt to see, but Kiali Service Account has permissions to see everything. + # For more control over what the Kial Service Account can see, use the Kiali Operator + accessible_namespaces: + - "**" + additional_service_yaml: {} + affinity: + node: {} + pod: {} + pod_anti: {} + custom_dashboards: + excludes: [''] + includes: ['*'] + hpa: + api_version: "autoscaling/v2beta2" + spec: {} + repository: rancher/mirrored-kiali-kiali + image_pull_policy: "Always" + image_pull_secrets: [] + tag: v1.32.0 + ingress_enabled: true + logger: + log_format: "text" + log_level: "info" + time_field_format: "2006-01-02T15:04:05Z07:00" + sampler_rate: "1" + node_selector: {} + override_ingress_yaml: + metadata: {} + pod_annotations: {} + pod_labels: {} + priority_class_name: "" + replicas: 1 + resources: {} + secret_name: "kiali" + service_annotations: {} + service_type: "" + tolerations: [] + version_label: v1.32.0 + view_only_mode: false + +external_services: + custom_dashboards: + enabled: true + +identity: {} + #cert_file: + #private_key_file: + +login_token: + signing_key: "" + +server: + port: 20001 + metrics_enabled: true + metrics_port: 9090 + web_root: "" + +# Common settings used among istio subcharts. +global: + # Specify rancher clusterId of external tracing config + # https://github.com/istio/istio.io/issues/4146#issuecomment-493543032 + cattle: + systemDefaultRegistry: "" + clusterId: + rbac: + pspEnabled: false diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/.helmignore b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/Chart.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/Chart.yaml new file mode 100755 index 000000000..6e368616d --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/Chart.yaml @@ -0,0 +1,12 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: istio-system + catalog.rancher.io/release-name: rancher-tracing +apiVersion: v1 +appVersion: 1.20.0 +description: A quick start Jaeger Tracing installation using the all-in-one demo. + This is not production qualified. Refer to https://www.jaegertracing.io/ for details. +name: tracing +version: 1.20.1 diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/README.md b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/README.md new file mode 100755 index 000000000..25534c628 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/README.md @@ -0,0 +1,5 @@ +# Jaeger + +A Rancher chart based on the Jaeger all-in-one quick installation option. This chart will allow you to trace and monitor distributed microservices. + +> **Note:** The basic all-in-one Jaeger installation which is not qualified for production. Use the [Jaeger Tracing](https://www.jaegertracing.io) documentation to determine which installation you will need for your production needs. diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/_affinity.tpl b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/_affinity.tpl new file mode 100755 index 000000000..bf6a9aee5 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/_affinity.tpl @@ -0,0 +1,92 @@ +{{/* affinity - https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ */}} +{{- define "nodeAffinity" }} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + {{- include "nodeAffinityRequiredDuringScheduling" . }} + preferredDuringSchedulingIgnoredDuringExecution: + {{- include "nodeAffinityPreferredDuringScheduling" . }} +{{- end }} + +{{- define "nodeAffinityRequiredDuringScheduling" }} + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + {{- range $key, $val := .Values.global.arch }} + {{- if gt ($val | int) 0 }} + - {{ $key | quote }} + {{- end }} + {{- end }} + {{- $nodeSelector := default .Values.global.defaultNodeSelector .Values.nodeSelector -}} + {{- range $key, $val := $nodeSelector }} + - key: {{ $key }} + operator: In + values: + - {{ $val | quote }} + {{- end }} +{{- end }} + +{{- define "nodeAffinityPreferredDuringScheduling" }} + {{- range $key, $val := .Values.global.arch }} + {{- if gt ($val | int) 0 }} + - weight: {{ $val | int }} + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - {{ $key | quote }} + {{- end }} + {{- end }} +{{- end }} + +{{- define "podAntiAffinity" }} +{{- if or .Values.podAntiAffinityLabelSelector .Values.podAntiAffinityTermLabelSelector}} + podAntiAffinity: + {{- if .Values.podAntiAffinityLabelSelector }} + requiredDuringSchedulingIgnoredDuringExecution: + {{- include "podAntiAffinityRequiredDuringScheduling" . }} + {{- end }} + {{- if or .Values.podAntiAffinityTermLabelSelector}} + preferredDuringSchedulingIgnoredDuringExecution: + {{- include "podAntiAffinityPreferredDuringScheduling" . }} + {{- end }} +{{- end }} +{{- end }} + +{{- define "podAntiAffinityRequiredDuringScheduling" }} + {{- range $index, $item := .Values.podAntiAffinityLabelSelector }} + - labelSelector: + matchExpressions: + - key: {{ $item.key }} + operator: {{ $item.operator }} + {{- if $item.values }} + values: + {{- $vals := split "," $item.values }} + {{- range $i, $v := $vals }} + - {{ $v | quote }} + {{- end }} + {{- end }} + topologyKey: {{ $item.topologyKey }} + {{- end }} +{{- end }} + +{{- define "podAntiAffinityPreferredDuringScheduling" }} + {{- range $index, $item := .Values.podAntiAffinityTermLabelSelector }} + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: {{ $item.key }} + operator: {{ $item.operator }} + {{- if $item.values }} + values: + {{- $vals := split "," $item.values }} + {{- range $i, $v := $vals }} + - {{ $v | quote }} + {{- end }} + {{- end }} + topologyKey: {{ $item.topologyKey }} + weight: 100 + {{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/_helpers.tpl b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/_helpers.tpl new file mode 100755 index 000000000..56cfa7335 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "tracing.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "tracing.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/deployment.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/deployment.yaml new file mode 100755 index 000000000..25bb67fd3 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/deployment.yaml @@ -0,0 +1,86 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + selector: + matchLabels: + app: {{ .Values.provider }} + template: + metadata: + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} + annotations: + sidecar.istio.io/inject: "false" + prometheus.io/scrape: "true" + prometheus.io/port: "14269" +{{- if .Values.jaeger.podAnnotations }} +{{ toYaml .Values.jaeger.podAnnotations | indent 8 }} +{{- end }} + spec: + containers: + - name: jaeger + image: "{{ template "system_default_registry" . }}{{ .Values.jaeger.repository }}:{{ .Values.jaeger.tag }}" + imagePullPolicy: {{ .Values.global.imagePullPolicy }} + env: + {{- if eq .Values.jaeger.spanStorageType "badger" }} + - name: BADGER_EPHEMERAL + value: "false" + - name: SPAN_STORAGE_TYPE + value: "badger" + - name: BADGER_DIRECTORY_VALUE + value: "/badger/data" + - name: BADGER_DIRECTORY_KEY + value: "/badger/key" + {{- end }} + - name: COLLECTOR_ZIPKIN_HTTP_PORT + value: "9411" + - name: MEMORY_MAX_TRACES + value: "{{ .Values.jaeger.memory.max_traces }}" + - name: QUERY_BASE_PATH + value: {{ if .Values.contextPath }} {{ .Values.contextPath }} {{ else }} /{{ .Values.provider }} {{ end }} + livenessProbe: + httpGet: + path: / + port: 14269 + readinessProbe: + httpGet: + path: / + port: 14269 +{{- if eq .Values.jaeger.spanStorageType "badger" }} + volumeMounts: + - name: data + mountPath: /badger +{{- end }} + resources: +{{- if .Values.jaeger.resources }} +{{ toYaml .Values.jaeger.resources | indent 12 }} +{{- else }} +{{ toYaml .Values.global.defaultResources | indent 12 }} +{{- end }} + affinity: + {{- include "nodeAffinity" . | indent 6 }} + {{- include "podAntiAffinity" . | indent 6 }} + {{- if .Values.global.rbac.pspEnabled }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + serviceAccountName: {{ include "tracing.fullname" . }} + {{- end }} +{{- if eq .Values.jaeger.spanStorageType "badger" }} + volumes: + - name: data +{{- if .Values.jaeger.persistentVolumeClaim.enabled }} + persistentVolumeClaim: + claimName: istio-jaeger-pvc +{{- else }} + emptyDir: {} +{{- end }} +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/psp.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/psp.yaml new file mode 100755 index 000000000..44b230492 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/psp.yaml @@ -0,0 +1,86 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "tracing.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ include "tracing.fullname" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +rules: +- apiGroups: + - policy + resourceNames: + - {{ include "tracing.fullname" . }} + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "tracing.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + forbiddenSysctls: + - '*' + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + runAsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - emptyDir + - secret + - persistentVolumeClaim +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/pvc.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/pvc.yaml new file mode 100755 index 000000000..9b4c55e4f --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/pvc.yaml @@ -0,0 +1,16 @@ +{{- if .Values.jaeger.persistentVolumeClaim.enabled }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: istio-jaeger-pvc + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} +spec: + storageClassName: {{ .Values.jaeger.storageClassName }} + accessModes: + - {{ .Values.jaeger.accessMode }} + resources: + requests: + storage: {{.Values.jaeger.persistentVolumeClaim.storage }} +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/service.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/service.yaml new file mode 100755 index 000000000..4210a9b5f --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/templates/service.yaml @@ -0,0 +1,63 @@ +apiVersion: v1 +kind: Service +metadata: + name: tracing + namespace: {{ .Release.Namespace }} + annotations: + {{- range $key, $val := .Values.service.annotations }} + {{ $key }}: {{ $val | quote }} + {{- end }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + type: {{ .Values.service.type }} + ports: + - name: {{ .Values.service.name }} + port: {{ .Values.service.externalPort }} + protocol: TCP + targetPort: 16686 + selector: + app: {{ .Values.provider }} +--- +# Jaeger implements the Zipkin API. To support swapping out the tracing backend, we use a Service named Zipkin. +apiVersion: v1 +kind: Service +metadata: + name: zipkin + namespace: {{ .Release.Namespace }} + labels: + name: zipkin + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + ports: + - name: {{ .Values.service.name }} + port: {{ .Values.zipkin.queryPort }} + targetPort: {{ .Values.zipkin.queryPort }} + selector: + app: {{ .Values.provider }} +--- +apiVersion: v1 +kind: Service +metadata: + name: jaeger-collector + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Values.provider }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + type: ClusterIP + ports: + - name: jaeger-collector-http + port: 14268 + targetPort: 14268 + protocol: TCP + - name: jaeger-collector-grpc + port: 14250 + targetPort: 14250 + protocol: TCP + selector: + app: {{ .Values.provider }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/values.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/values.yaml new file mode 100755 index 000000000..18ff81c3c --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/charts/tracing/values.yaml @@ -0,0 +1,44 @@ +provider: jaeger +contextPath: "" +nodeSelector: {} +podAntiAffinityLabelSelector: [] +podAntiAffinityTermLabelSelector: [] +nameOverride: "" +fullnameOverride: "" + +global: + cattle: + systemDefaultRegistry: "" + defaultResources: {} + imagePullPolicy: IfNotPresent + imagePullSecrets: [] + arch: + amd64: 2 + s390x: 2 + ppc64le: 2 + defaultNodeSelector: {} + rbac: + pspEnabled: false + +jaeger: + repository: rancher/mirrored-jaegertracing-all-in-one + tag: 1.20.0 + # spanStorageType value can be "memory" and "badger" for all-in-one image + spanStorageType: badger + resources: + requests: + cpu: 10m + persistentVolumeClaim: + enabled: false + storage: 5Gi + storageClassName: "" + accessMode: ReadWriteMany + memory: + max_traces: 50000 +zipkin: + queryPort: 9411 +service: + annotations: {} + name: http-query + type: ClusterIP + externalPort: 16686 diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/configs/istio-base.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/configs/istio-base.yaml new file mode 100755 index 000000000..c5fa6f5f0 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/configs/istio-base.yaml @@ -0,0 +1,82 @@ +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + components: + base: + enabled: {{ .Values.base.enabled }} + cni: + enabled: {{ .Values.cni.enabled }} + egressGateways: + - enabled: {{ .Values.egressGateways.enabled }} + name: istio-egressgateway + ingressGateways: + - enabled: {{ .Values.ingressGateways.enabled }} + name: istio-ingressgateway + k8s: + service: + ports: + - name: status-port + port: 15021 + targetPort: 15021 + - name: http2 + port: 80 + targetPort: 8080 + nodePort: 31380 + - name: https + port: 443 + targetPort: 8443 + nodePort: 31390 + - name: tcp + port: 31400 + targetPort: 31400 + nodePort: 31400 + - name: tls + port: 15443 + targetPort: 15443 + istiodRemote: + enabled: {{ .Values.istiodRemote.enabled }} + pilot: + enabled: {{ .Values.pilot.enabled }} + hub: {{ .Values.systemDefaultRegistry | default "docker.io" }} + profile: default + tag: {{ .Values.tag }} + revision: {{ .Values.revision }} + meshConfig: + defaultConfig: + proxyMetadata: + {{- if .Values.dns.enabled }} + ISTIO_META_DNS_CAPTURE: "true" + {{- end }} + values: + gateways: + istio-egressgateway: + name: istio-egressgateway + type: {{ .Values.egressGateways.type }} + istio-ingressgateway: + name: istio-ingressgateway + type: {{ .Values.ingressGateways.type }} + global: + istioNamespace: {{ template "istio.namespace" . }} + proxy: + image: {{ template "system_default_registry" . }}{{ .Values.global.proxy.repository }}:{{ .Values.global.proxy.tag }} + proxy_init: + image: {{ template "system_default_registry" . }}{{ .Values.global.proxy_init.repository }}:{{ .Values.global.proxy_init.tag }} + {{- if .Values.global.defaultPodDisruptionBudget.enabled }} + defaultPodDisruptionBudget: + enabled: {{ .Values.global.defaultPodDisruptionBudget.enabled }} + {{- end }} + {{- if .Values.pilot.enabled }} + pilot: + image: {{ template "system_default_registry" . }}{{ .Values.pilot.repository }}:{{ .Values.pilot.tag }} + {{- end }} + telemetry: + enabled: {{ .Values.telemetry.enabled }} + v2: + enabled: {{ .Values.telemetry.v2.enabled }} + {{- if .Values.cni.enabled }} + cni: + image: {{ template "system_default_registry" . }}{{ .Values.cni.repository }}:{{ .Values.cni.tag }} + excludeNamespaces: + {{- toYaml .Values.cni.excludeNamespaces | nindent 8 }} + logLevel: {{ .Values.cni.logLevel }} + {{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/requirements.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/requirements.yaml new file mode 100755 index 000000000..b60745780 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/requirements.yaml @@ -0,0 +1,17 @@ +dependencies: +- name: kiali + version: "" + repository: file://./charts/kiali + condition: kiali.enabled + tags: [] + enabled: false + importvalues: [] + alias: "" +- name: tracing + version: "" + repository: file://./charts/tracing + condition: tracing.enabled + tags: [] + enabled: false + importvalues: [] + alias: "" diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/samples/overlay-example.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/samples/overlay-example.yaml new file mode 100755 index 000000000..5cf3cf3b0 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/samples/overlay-example.yaml @@ -0,0 +1,37 @@ +apiVersion: install.istio.io/v1alpha1 +kind: IstioOperator +spec: + components: + ingressGateways: + - enabled: true + name: ilb-gateway + namespace: user-ingressgateway-ns + k8s: + resources: + requests: + cpu: 200m + service: + ports: + - name: tcp-citadel-grpc-tls + port: 8060 + targetPort: 8060 + - name: tcp-dns + port: 5353 + serviceAnnotations: + cloud.google.com/load-balancer-type: internal + - enabled: true + name: other-gateway + namespace: cattle-istio-system + k8s: + resources: + requests: + cpu: 200m + service: + ports: + - name: tcp-citadel-grpc-tls + port: 8060 + targetPort: 8060 + - name: tcp-dns + port: 5353 + serviceAnnotations: + cloud.google.com/load-balancer-type: internal diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/_helpers.tpl b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/_helpers.tpl new file mode 100755 index 000000000..3f7af953a --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/_helpers.tpl @@ -0,0 +1,12 @@ +{{/* Ensure namespace is set the same everywhere */}} +{{- define "istio.namespace" -}} + {{- .Release.Namespace | default "istio-system" -}} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/admin-role.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/admin-role.yaml new file mode 100755 index 000000000..ad1313c4f --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/admin-role.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: istio-admin + namespace: {{ template "istio.namespace" . }} +rules: + - apiGroups: + - config.istio.io + resources: + - adapters + - attributemanifests + - handlers + - httpapispecbindings + - httpapispecs + - instances + - quotaspecbindings + - quotaspecs + - rules + - templates + verbs: ["get", "watch", "list"] + - apiGroups: + - networking.istio.io + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + - workloadentries + verbs: + - '*' + - apiGroups: + - security.istio.io + resources: + - authorizationpolicies + - peerauthentications + - requestauthentications + verbs: + - '*' diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/base-config-map.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/base-config-map.yaml new file mode 100755 index 000000000..5323917bc --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/base-config-map.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-installer-base + namespace: {{ template "istio.namespace" . }} +data: +{{ tpl (.Files.Glob "configs/*").AsConfig . | indent 2 }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/clusterrole.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/clusterrole.yaml new file mode 100755 index 000000000..a93b3df95 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/clusterrole.yaml @@ -0,0 +1,120 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: istio-installer +rules: +# istio groups +- apiGroups: + - authentication.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - config.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - install.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - networking.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - rbac.istio.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - security.istio.io + resources: + - '*' + verbs: + - '*' +# k8s groups +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - '*' +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions.apiextensions.k8s.io + - customresourcedefinitions + verbs: + - '*' +- apiGroups: + - apps + - extensions + resources: + - daemonsets + - deployments + - deployments/finalizers + - ingresses + - replicasets + - statefulsets + verbs: + - '*' +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - '*' +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - get + - create +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - '*' +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - roles + - rolebindings + verbs: + - '*' +- apiGroups: + - "" + resources: + - configmaps + - endpoints + - events + - namespaces + - pods + - pods/exec + - persistentvolumeclaims + - secrets + - services + - serviceaccounts + verbs: + - '*' +- apiGroups: + - policy + resourceNames: + - istio-installer + resources: + - podsecuritypolicies + verbs: + - use diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/clusterrolebinding.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/clusterrolebinding.yaml new file mode 100755 index 000000000..9d74a0434 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: istio-installer +subjects: +- kind: ServiceAccount + name: istio-installer + namespace: {{ template "istio.namespace" . }} +roleRef: + kind: ClusterRole + name: istio-installer + apiGroup: rbac.authorization.k8s.io diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/edit-role.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/edit-role.yaml new file mode 100755 index 000000000..d1059d58d --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/edit-role.yaml @@ -0,0 +1,43 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + namespace: {{ template "istio.namespace" . }} + name: istio-edit +rules: + - apiGroups: + - config.istio.io + resources: + - adapters + - attributemanifests + - handlers + - httpapispecbindings + - httpapispecs + - instances + - quotaspecbindings + - quotaspecs + - rules + - templates + verbs: ["get", "watch", "list"] + - apiGroups: + - networking.istio.io + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + - workloadentries + verbs: + - '*' + - apiGroups: + - security.istio.io + resources: + - authorizationpolicies + - peerauthentications + - requestauthentications + verbs: + - '*' diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-cni-psp.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-cni-psp.yaml new file mode 100755 index 000000000..5b94c8503 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-cni-psp.yaml @@ -0,0 +1,51 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp-istio-cni + namespace: {{ template "istio.namespace" . }} +spec: + allowPrivilegeEscalation: true + fsGroup: + rule: RunAsAny + hostNetwork: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - secret + - configMap + - emptyDir + - hostPath +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: psp-istio-cni + namespace: {{ template "istio.namespace" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: psp-istio-cni +subjects: + - kind: ServiceAccount + name: istio-cni +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: psp-istio-cni + namespace: {{ template "istio.namespace" . }} +rules: +- apiGroups: + - policy + resourceNames: + - psp-istio-cni + resources: + - podsecuritypolicies + verbs: + - use +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-install-job.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-install-job.yaml new file mode 100755 index 000000000..9a13f5698 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-install-job.yaml @@ -0,0 +1,50 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: istioctl-installer + namespace: {{ template "istio.namespace" . }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded +spec: + backoffLimit: 1 + template: + spec: + containers: + - name: istioctl-installer + image: {{ template "system_default_registry" . }}{{ .Values.installer.repository }}:{{ .Values.installer.tag }} + env: + - name: RELEASE_NAME + value: {{ .Release.Name }} + - name: ISTIO_NAMESPACE + value: {{ template "istio.namespace" . }} + - name: FORCE_INSTALL + value: {{ .Values.forceInstall | default "false" | quote }} + command: ["/bin/sh","-c"] + args: ["/usr/local/app/scripts/run.sh"] + volumeMounts: + - name: config-volume + mountPath: /app/istio-base.yaml + subPath: istio-base.yaml + {{- if .Values.overlayFile }} + - name: overlay-volume + mountPath: /app/overlay-config.yaml + subPath: overlay-config.yaml + {{- end }} + volumes: + - name: config-volume + configMap: + name: istio-installer-base + {{- if .Values.overlayFile }} + - name: overlay-volume + configMap: + name: istio-installer-overlay + {{- end }} + serviceAccountName: istio-installer + {{- if .Values.global.rbac.pspEnabled }} + securityContext: + runAsUser: 101 + runAsGroup: 101 + {{- end }} + restartPolicy: Never diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-install-psp.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-install-psp.yaml new file mode 100755 index 000000000..f0b5ee565 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-install-psp.yaml @@ -0,0 +1,30 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: istio-installer + namespace: {{ template "istio.namespace" . }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'configMap' + - 'secret' +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-psp.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-psp.yaml new file mode 100755 index 000000000..b3758b74f --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-psp.yaml @@ -0,0 +1,81 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: istio-psp + namespace: {{ template "istio.namespace" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: istio-psp +subjects: + - kind: ServiceAccount + name: istio-egressgateway-service-account + - kind: ServiceAccount + name: istio-ingressgateway-service-account + - kind: ServiceAccount + name: istio-mixer-service-account + - kind: ServiceAccount + name: istio-operator-authproxy + - kind: ServiceAccount + name: istiod-service-account + - kind: ServiceAccount + name: istio-sidecar-injector-service-account + - kind: ServiceAccount + name: istiocoredns-service-account + - kind: ServiceAccount + name: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: istio-psp + namespace: {{ template "istio.namespace" . }} +rules: +- apiGroups: + - policy + resourceNames: + - istio-psp + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: istio-psp + namespace: {{ template "istio.namespace" . }} +spec: + allowPrivilegeEscalation: false + forbiddenSysctls: + - '*' + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + runAsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI + - persistentVolumeClaim +{{- end }} \ No newline at end of file diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-uninstall-job.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-uninstall-job.yaml new file mode 100755 index 000000000..a7f156325 --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/istio-uninstall-job.yaml @@ -0,0 +1,45 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: istioctl-uninstaller + namespace: {{ template "istio.namespace" . }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-weight": "-5" + "helm.sh/hook-delete-policy": hook-succeeded +spec: + template: + spec: + containers: + - name: istioctl-uninstaller + image: {{ template "system_default_registry" . }}{{ .Values.installer.repository }}:{{ .Values.installer.tag }} + env: + - name: RELEASE_NAME + value: {{ .Release.Name }} + - name: ISTIO_NAMESPACE + value: {{ template "istio.namespace" . }} + command: ["/bin/sh","-c"] + args: ["/usr/local/app/scripts/uninstall_istio_system.sh"] + volumeMounts: + - name: config-volume + mountPath: /app/istio-base.yaml + subPath: istio-base.yaml + {{- if .Values.overlayFile }} + - name: overlay-volume + mountPath: /app/overlay-config.yaml + subPath: overlay-config.yaml + {{ end }} + volumes: + - name: config-volume + configMap: + name: istio-installer-base + {{- if .Values.overlayFile }} + - name: overlay-volume + configMap: + name: istio-installer-overlay + {{ end }} + serviceAccountName: istio-installer + securityContext: + runAsUser: 101 + runAsGroup: 101 + restartPolicy: OnFailure diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/overlay-config-map.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/overlay-config-map.yaml new file mode 100755 index 000000000..287d26b2c --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/overlay-config-map.yaml @@ -0,0 +1,9 @@ +{{- if .Values.overlayFile }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: istio-installer-overlay + namespace: {{ template "istio.namespace" . }} +data: + overlay-config.yaml: {{ toYaml .Values.overlayFile | indent 2 }} +{{- end }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/service-monitors.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/service-monitors.yaml new file mode 100755 index 000000000..c3d60c4fc --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/service-monitors.yaml @@ -0,0 +1,51 @@ +{{- if .Values.kiali.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: envoy-stats-monitor + namespace: {{ template "istio.namespace" . }} + labels: + monitoring: istio-proxies +spec: + selector: + matchExpressions: + - {key: istio-prometheus-ignore, operator: DoesNotExist} + namespaceSelector: + any: true + jobLabel: envoy-stats + endpoints: + - path: /stats/prometheus + targetPort: 15090 + interval: 15s + relabelings: + - sourceLabels: [__meta_kubernetes_pod_container_port_name] + action: keep + regex: '.*-envoy-prom' + - action: labeldrop + regex: "__meta_kubernetes_pod_label_(.+)" + - sourceLabels: [__meta_kubernetes_namespace] + action: replace + targetLabel: namespace + - sourceLabels: [__meta_kubernetes_pod_name] + action: replace + targetLabel: pod_name +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: istio-component-monitor + namespace: {{ template "istio.namespace" . }} + labels: + monitoring: istio-components +spec: + jobLabel: istio + targetLabels: [app] + selector: + matchExpressions: + - {key: istio, operator: In, values: [pilot]} + namespaceSelector: + any: true + endpoints: + - port: http-monitoring + interval: 15s +{{- end -}} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/serviceaccount.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/serviceaccount.yaml new file mode 100755 index 000000000..82b6cbb7e --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/serviceaccount.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: istio-installer + namespace: {{ template "istio.namespace" . }} diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/view-role.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/view-role.yaml new file mode 100755 index 000000000..5947d3eba --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/templates/view-role.yaml @@ -0,0 +1,41 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + namespace: {{ template "istio.namespace" . }} + name: istio-view +rules: + - apiGroups: + - config.istio.io + resources: + - adapters + - attributemanifests + - handlers + - httpapispecbindings + - httpapispecs + - instances + - quotaspecbindings + - quotaspecs + - rules + - templates + verbs: ["get", "watch", "list"] + - apiGroups: + - networking.istio.io + resources: + - destinationrules + - envoyfilters + - gateways + - serviceentries + - sidecars + - virtualservices + - workloadentries + verbs: ["get", "watch", "list"] + - apiGroups: + - security.istio.io + resources: + - authorizationpolicies + - peerauthentications + - requestauthentications + verbs: ["get", "watch", "list"] diff --git a/charts/rancher-istio-1.9/rancher-istio/1.9.500/values.yaml b/charts/rancher-istio-1.9/rancher-istio/1.9.500/values.yaml new file mode 100755 index 000000000..e679a6fbb --- /dev/null +++ b/charts/rancher-istio-1.9/rancher-istio/1.9.500/values.yaml @@ -0,0 +1,85 @@ +overlayFile: "" +tag: 1.9.5 +##Setting forceInstall: true will remove the check for istio version < 1.6.x and will not analyze your install cluster prior to install +forceInstall: false + +installer: + repository: rancher/istio-installer + tag: 1.9.5-rancher1 + +##Native support for dns added in 1.8 +dns: + enabled: false + +base: + enabled: true + +cni: + enabled: false + repository: rancher/mirrored-istio-install-cni + tag: 1.9.5 + logLevel: info + excludeNamespaces: + - istio-system + - kube-system + +egressGateways: + enabled: false + type: NodePort + +ingressGateways: + enabled: true + type: NodePort + +istiodRemote: + enabled: false + +pilot: + enabled: true + repository: rancher/mirrored-istio-pilot + tag: 1.9.5 + +telemetry: + enabled: true + v2: + enabled: true + +global: + cattle: + systemDefaultRegistry: "" + proxy: + repository: rancher/mirrored-istio-proxyv2 + tag: 1.9.5 + proxy_init: + repository: rancher/mirrored-istio-proxyv2 + tag: 1.9.5 + defaultPodDisruptionBudget: + enabled: true + rbac: + pspEnabled: false + +# Kiali subchart from rancher-kiali-server +kiali: + enabled: true + auth: + strategy: anonymous + deployment: + ingress_enabled: false + repository: rancher/mirrored-kiali-kiali + tag: v1.32.0 + external_services: + prometheus: + custom_metrics_url: "http://rancher-monitoring-prometheus.cattle-monitoring-system.svc:9090" + url: "http://rancher-monitoring-prometheus.cattle-monitoring-system.svc:9090" + tracing: + in_cluster_url: "http://tracing.istio-system.svc:16686/jaeger" + grafana: + in_cluster_url: "http://rancher-monitoring-grafana.cattle-monitoring-system.svc:80" + url: "http://rancher-monitoring-grafana.cattle-monitoring-system.svc:80" + +tracing: + enabled: false + contextPath: "/jaeger" + jaeger: + repository: rancher/mirrored-jaegertracing-all-in-one + tag: 1.20.0 diff --git a/charts/rancher-kiali-server/rancher-kiali-server-crd/1.32.100/Chart.yaml b/charts/rancher-kiali-server/rancher-kiali-server-crd/1.32.100/Chart.yaml new file mode 100755 index 000000000..2ec92b491 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server-crd/1.32.100/Chart.yaml @@ -0,0 +1,7 @@ +annotations: + catalog.cattle.io/hidden: "true" +apiVersion: v2 +description: Installs the CRDs for rancher-kiali-server. +name: rancher-kiali-server-crd +type: application +version: 1.32.100 diff --git a/charts/rancher-kiali-server/rancher-kiali-server-crd/1.32.100/README.md b/charts/rancher-kiali-server/rancher-kiali-server-crd/1.32.100/README.md new file mode 100755 index 000000000..3847c18a1 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server-crd/1.32.100/README.md @@ -0,0 +1,2 @@ +# rancher-kiali-server-crd +A Rancher chart that installs the CRDs used by rancher-kiali-server. diff --git a/charts/rancher-kiali-server/rancher-kiali-server-crd/1.32.100/templates/crds.yaml b/charts/rancher-kiali-server/rancher-kiali-server-crd/1.32.100/templates/crds.yaml new file mode 100755 index 000000000..ae7c49349 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server-crd/1.32.100/templates/crds.yaml @@ -0,0 +1,22 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: monitoringdashboards.monitoring.kiali.io +spec: + group: monitoring.kiali.io + names: + kind: MonitoringDashboard + listKind: MonitoringDashboardList + plural: monitoringdashboards + singular: monitoringdashboard + scope: Namespaced + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true +... diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/Chart.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/Chart.yaml new file mode 100755 index 000000000..0216620e2 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/Chart.yaml @@ -0,0 +1,31 @@ +annotations: + catalog.cattle.io/auto-install: rancher-kiali-server-crd=match + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.cattle.io/provides-gvr: monitoringdashboards.monitoring.kiali.io/v1alpha1 + catalog.cattle.io/requires-gvr: monitoring.coreos.com.prometheus/v1 + catalog.rancher.io/namespace: cattle-istio-system + catalog.rancher.io/release-name: rancher-kiali-server +apiVersion: v2 +appVersion: v1.32.0 +description: Kiali is an open source project for service mesh observability, refer + to https://www.kiali.io for details. This is installed as sub-chart with customized + values in Rancher's Istio. +home: https://github.com/kiali/kiali +icon: https://raw.githubusercontent.com/kiali/kiali.io/master/themes/kiali/static/img/kiali_logo_masthead.png +keywords: +- istio +- kiali +- networking +- infrastructure +maintainers: +- email: kiali-users@googlegroups.com + name: Kiali + url: https://kiali.io +name: rancher-kiali-server +sources: +- https://github.com/kiali/kiali +- https://github.com/kiali/kiali-ui +- https://github.com/kiali/kiali-operator +- https://github.com/kiali/helm-charts +version: 1.32.100 diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/NOTES.txt b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/NOTES.txt new file mode 100755 index 000000000..751019401 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/NOTES.txt @@ -0,0 +1,5 @@ +Welcome to Kiali! For more details on Kiali, see: https://kiali.io + +The Kiali Server [{{ .Chart.AppVersion }}] has been installed in namespace [{{ .Release.Namespace }}]. It will be ready soon. + +(Helm: Chart=[{{ .Chart.Name }}], Release=[{{ .Release.Name }}], Version=[{{ .Chart.Version }}]) diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/_helpers.tpl b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/_helpers.tpl new file mode 100755 index 000000000..dd33bbe48 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/_helpers.tpl @@ -0,0 +1,192 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "kiali-server.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kiali-server.fullname" -}} +{{- if .Values.fullnameOverride }} + {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} + {{- $name := default .Chart.Name .Values.nameOverride }} + {{- printf "%s" $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kiali-server.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Identifies the log_level with the old verbose_mode and the new log_level considered. +*/}} +{{- define "kiali-server.logLevel" -}} +{{- if .Values.deployment.verbose_mode -}} +{{- .Values.deployment.verbose_mode -}} +{{- else -}} +{{- .Values.deployment.logger.log_level -}} +{{- end -}} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "kiali-server.labels" -}} +helm.sh/chart: {{ include "kiali-server.chart" . }} +app: {{ include "kiali-server.name" . }} +{{ include "kiali-server.selectorLabels" . }} +version: {{ .Values.deployment.version_label | default .Chart.AppVersion | quote }} +app.kubernetes.io/version: {{ .Values.deployment.version_label | default .Chart.AppVersion | quote }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/part-of: "kiali" +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kiali-server.selectorLabels" -}} +app.kubernetes.io/name: {{ include "kiali-server.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Used to determine if a custom dashboard (defined in .Template.Name) should be deployed. +*/}} +{{- define "kiali-server.isDashboardEnabled" -}} +{{- if .Values.external_services.custom_dashboards.enabled }} + {{- $includere := "" }} + {{- range $_, $s := .Values.deployment.custom_dashboards.includes }} + {{- if $s }} + {{- if $includere }} + {{- $includere = printf "%s|^%s$" $includere ($s | replace "*" ".*" | replace "?" ".") }} + {{- else }} + {{- $includere = printf "^%s$" ($s | replace "*" ".*" | replace "?" ".") }} + {{- end }} + {{- end }} + {{- end }} + {{- $excludere := "" }} + {{- range $_, $s := .Values.deployment.custom_dashboards.excludes }} + {{- if $s }} + {{- if $excludere }} + {{- $excludere = printf "%s|^%s$" $excludere ($s | replace "*" ".*" | replace "?" ".") }} + {{- else }} + {{- $excludere = printf "^%s$" ($s | replace "*" ".*" | replace "?" ".") }} + {{- end }} + {{- end }} + {{- end }} + {{- if (and (mustRegexMatch (default "no-matches" $includere) (base .Template.Name)) (not (mustRegexMatch (default "no-matches" $excludere) (base .Template.Name)))) }} + {{- print "enabled" }} + {{- else }} + {{- print "" }} + {{- end }} +{{- else }} + {{- print "" }} +{{- end }} +{{- end }} + +{{/* +Determine the default login token signing key. +*/}} +{{- define "kiali-server.login_token.signing_key" -}} +{{- if .Values.login_token.signing_key }} + {{- .Values.login_token.signing_key }} +{{- else }} + {{- randAlphaNum 16 }} +{{- end }} +{{- end }} + +{{/* +Determine the default web root. +*/}} +{{- define "kiali-server.server.web_root" -}} +{{- if .Values.server.web_root }} + {{- .Values.server.web_root | trimSuffix "/" }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- "/" }} + {{- else }} + {{- "/kiali" }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Determine the default identity cert file. There is no default if on k8s; only on OpenShift. +*/}} +{{- define "kiali-server.identity.cert_file" -}} +{{- if hasKey .Values.identity "cert_file" }} + {{- .Values.identity.cert_file }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- "/kiali-cert/tls.crt" }} + {{- else }} + {{- "" }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Determine the default identity private key file. There is no default if on k8s; only on OpenShift. +*/}} +{{- define "kiali-server.identity.private_key_file" -}} +{{- if hasKey .Values.identity "private_key_file" }} + {{- .Values.identity.private_key_file }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- "/kiali-cert/tls.key" }} + {{- else }} + {{- "" }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Determine the istio namespace - default is where Kiali is installed. +*/}} +{{- define "kiali-server.istio_namespace" -}} +{{- if .Values.istio_namespace }} + {{- .Values.istio_namespace }} +{{- else }} + {{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{/* +Determine the auth strategy to use - default is "token" on Kubernetes and "openshift" on OpenShift. +*/}} +{{- define "kiali-server.auth.strategy" -}} +{{- if .Values.auth.strategy }} + {{- if (and (eq .Values.auth.strategy "openshift") (not .Values.kiali_route_url)) }} + {{- fail "You did not define what the Kiali Route URL will be (--set kiali_route_url=...). Without this set, the openshift auth strategy will not work. Either set that or use a different auth strategy via the --set auth.strategy=... option." }} + {{- end }} + {{- .Values.auth.strategy }} +{{- else }} + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + {{- if not .Values.kiali_route_url }} + {{- fail "You did not define what the Kiali Route URL will be (--set kiali_route_url=...). Without this set, the openshift auth strategy will not work. Either set that or explicitly indicate another auth strategy you want via the --set auth.strategy=... option." }} + {{- end }} + {{- "openshift" }} + {{- else }} + {{- "token" }} + {{- end }} +{{- end }} +{{- end }} + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/cabundle.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/cabundle.yaml new file mode 100755 index 000000000..7462b95a7 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/cabundle.yaml @@ -0,0 +1,13 @@ +{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kiali-server.fullname" . }}-cabundle + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + annotations: + service.beta.openshift.io/inject-cabundle: "true" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/configmap.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/configmap.yaml new file mode 100755 index 000000000..b1bf53173 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/configmap.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +data: + config.yaml: | + {{- /* Most of .Values is simply the ConfigMap - strip out the keys that are not part of the ConfigMap */}} + {{- $cm := omit .Values "nameOverride" "fullnameOverride" "kiali_route_url" }} + {{- /* The helm chart defines namespace for us, but pass it to the ConfigMap in case the server needs it */}} + {{- $_ := set $cm.deployment "namespace" .Release.Namespace }} + {{- /* Some values of the ConfigMap are generated, but might not be identical, from .Values */}} + {{- $_ := set $cm "istio_namespace" (include "kiali-server.istio_namespace" .) }} + {{- $_ := set $cm.auth "strategy" (include "kiali-server.auth.strategy" .) }} + {{- $_ := set $cm.auth.openshift "client_id_prefix" (include "kiali-server.fullname" .) }} + {{- $_ := set $cm.identity "cert_file" (include "kiali-server.identity.cert_file" .) }} + {{- $_ := set $cm.identity "private_key_file" (include "kiali-server.identity.private_key_file" .) }} + {{- $_ := set $cm.login_token "signing_key" (include "kiali-server.login_token.signing_key" .) }} + {{- $_ := set $cm.server "web_root" (include "kiali-server.server.web_root" .) }} + {{- toYaml $cm | nindent 4 }} +... diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/envoy.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/envoy.yaml new file mode 100755 index 000000000..85b402017 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/envoy.yaml @@ -0,0 +1,56 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: envoy + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Envoy Metrics + discoverOn: "envoy_server_uptime" + items: + - chart: + name: "Pods uptime" + spans: 4 + metricName: "envoy_server_uptime" + dataType: "raw" + - chart: + name: "Allocated memory" + unit: "bytes" + spans: 4 + metricName: "envoy_server_memory_allocated" + dataType: "raw" + min: 0 + - chart: + name: "Heap size" + unit: "bytes" + spans: 4 + metricName: "envoy_server_memory_heap_size" + dataType: "raw" + min: 0 + - chart: + name: "Upstream active connections" + spans: 6 + metricName: "envoy_cluster_upstream_cx_active" + dataType: "raw" + - chart: + name: "Upstream total requests" + spans: 6 + metricName: "envoy_cluster_upstream_rq_total" + unit: "rps" + dataType: "rate" + - chart: + name: "Downstream active connections" + spans: 6 + metricName: "envoy_listener_downstream_cx_active" + dataType: "raw" + - chart: + name: "Downstream HTTP requests" + spans: 6 + metricName: "envoy_listener_http_downstream_rq" + unit: "rps" + dataType: "rate" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/go.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/go.yaml new file mode 100755 index 000000000..2d2f42a93 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/go.yaml @@ -0,0 +1,67 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: go + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Go Metrics + runtime: Go + discoverOn: "go_info" + items: + - chart: + name: "CPU ratio" + spans: 6 + metricName: "process_cpu_seconds_total" + dataType: "rate" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "RSS Memory" + unit: "bytes" + spans: 6 + metricName: "process_resident_memory_bytes" + dataType: "raw" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "Goroutines" + spans: 6 + metricName: "go_goroutines" + dataType: "raw" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "Heap allocation rate" + unit: "bytes/s" + spans: 6 + metricName: "go_memstats_alloc_bytes_total" + dataType: "rate" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "GC rate" + spans: 6 + metricName: "go_gc_duration_seconds_count" + dataType: "rate" + aggregations: + - label: "pod_name" + displayName: "Pod" + - chart: + name: "Next GC" + unit: "bytes" + spans: 6 + metricName: "go_memstats_next_gc_bytes" + dataType: "raw" + aggregations: + - label: "pod_name" + displayName: "Pod" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/kiali.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/kiali.yaml new file mode 100755 index 000000000..b1f011b4f --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/kiali.yaml @@ -0,0 +1,44 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: kiali + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Kiali Internal Metrics + items: + - chart: + name: "API processing duration" + unit: "seconds" + spans: 6 + metricName: "kiali_api_processing_duration_seconds" + dataType: "histogram" + aggregations: + - label: "route" + displayName: "Route" + - chart: + name: "Functions processing duration" + unit: "seconds" + spans: 6 + metricName: "kiali_go_function_processing_duration_seconds" + dataType: "histogram" + aggregations: + - label: "function" + displayName: "Function" + - label: "package" + displayName: "Package" + - chart: + name: "Failures" + spans: 12 + metricName: "kiali_go_function_failures_total" + dataType: "raw" + aggregations: + - label: "function" + displayName: "Function" + - label: "package" + displayName: "Package" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml new file mode 100755 index 000000000..2e1ed5cff --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/micrometer-1.0.6-jvm-pool.yaml @@ -0,0 +1,43 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: micrometer-1.0.6-jvm-pool + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: JVM + title: JVM Pool Metrics + discoverOn: "jvm_buffer_total_capacity_bytes" + items: + - chart: + name: "Pool buffer memory used" + unit: "bytes" + spans: 4 + metricName: "jvm_buffer_memory_used_bytes" + dataType: "raw" + aggregations: + - label: "id" + displayName: "Pool" + - chart: + name: "Pool buffer capacity" + unit: "bytes" + spans: 4 + metricName: "jvm_buffer_total_capacity_bytes" + dataType: "raw" + aggregations: + - label: "id" + displayName: "Pool" + - chart: + name: "Pool buffer count" + unit: "bytes" + spans: 4 + metricName: "jvm_buffer_count" + dataType: "raw" + aggregations: + - label: "id" + displayName: "Pool" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/micrometer-1.0.6-jvm.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/micrometer-1.0.6-jvm.yaml new file mode 100755 index 000000000..d64596882 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/micrometer-1.0.6-jvm.yaml @@ -0,0 +1,65 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: micrometer-1.0.6-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: JVM + title: JVM Metrics + discoverOn: "jvm_threads_live" + items: + - chart: + name: "Total live threads" + spans: 4 + metricName: "jvm_threads_live" + dataType: "raw" + - chart: + name: "Daemon threads" + spans: 4 + metricName: "jvm_threads_daemon" + dataType: "raw" + - chart: + name: "Loaded classes" + spans: 4 + metricName: "jvm_classes_loaded" + dataType: "raw" + + - chart: + name: "Memory used" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_used_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory commited" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_committed_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory max" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_max_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/micrometer-1.1-jvm.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/micrometer-1.1-jvm.yaml new file mode 100755 index 000000000..76e8d0a4a --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/micrometer-1.1-jvm.yaml @@ -0,0 +1,68 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: micrometer-1.1-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: JVM + title: JVM Metrics + discoverOn: "jvm_threads_live_threads" + items: + - chart: + name: "Memory used" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_used_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory commited" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_committed_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + - chart: + name: "Memory max" + unit: "bytes" + spans: 4 + metricName: "jvm_memory_max_bytes" + dataType: "raw" + aggregations: + - label: "area" + displayName: "Area" + - label: "id" + displayName: "Space" + + - chart: + name: "Total live threads" + spans: 4 + metricName: "jvm_threads_live_threads" + dataType: "raw" + - chart: + name: "Daemon threads" + spans: 4 + metricName: "jvm_threads_daemon_threads" + dataType: "raw" + - chart: + name: "Threads states" + spans: 4 + metricName: "jvm_threads_states_threads" + dataType: "raw" + aggregations: + - label: "state" + displayName: "State" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/microprofile-1.1.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/microprofile-1.1.yaml new file mode 100755 index 000000000..1d4951196 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/microprofile-1.1.yaml @@ -0,0 +1,59 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: microprofile-1.1 + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: MicroProfile Metrics + runtime: MicroProfile + discoverOn: "base:thread_count" + items: + - chart: + name: "Current loaded classes" + spans: 6 + metricName: "base:classloader_current_loaded_class_count" + dataType: "raw" + - chart: + name: "Unloaded classes" + spans: 6 + metricName: "base:classloader_total_unloaded_class_count" + dataType: "raw" + - chart: + name: "Thread count" + spans: 4 + metricName: "base:thread_count" + dataType: "raw" + - chart: + name: "Thread max count" + spans: 4 + metricName: "base:thread_max_count" + dataType: "raw" + - chart: + name: "Thread daemon count" + spans: 4 + metricName: "base:thread_daemon_count" + dataType: "raw" + - chart: + name: "Committed heap" + unit: "bytes" + spans: 4 + metricName: "base:memory_committed_heap_bytes" + dataType: "raw" + - chart: + name: "Max heap" + unit: "bytes" + spans: 4 + metricName: "base:memory_max_heap_bytes" + dataType: "raw" + - chart: + name: "Used heap" + unit: "bytes" + spans: 4 + metricName: "base:memory_used_heap_bytes" + dataType: "raw" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/microprofile-x.y.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/microprofile-x.y.yaml new file mode 100755 index 000000000..57ddc60ef --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/microprofile-x.y.yaml @@ -0,0 +1,38 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: microprofile-x.y + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: MicroProfile Metrics + runtime: MicroProfile + discoverOn: "base:gc_complete_scavenger_count" + items: + - chart: + name: "Young GC time" + unit: "seconds" + spans: 3 + metricName: "base:gc_young_generation_scavenger_time_seconds" + dataType: "raw" + - chart: + name: "Young GC count" + spans: 3 + metricName: "base:gc_young_generation_scavenger_count" + dataType: "raw" + - chart: + name: "Total GC time" + unit: "seconds" + spans: 3 + metricName: "base:gc_complete_scavenger_time_seconds" + dataType: "raw" + - chart: + name: "Total GC count" + spans: 3 + metricName: "base:gc_complete_scavenger_count" + dataType: "raw" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/nodejs.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/nodejs.yaml new file mode 100755 index 000000000..1ffe0aa10 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/nodejs.yaml @@ -0,0 +1,59 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: nodejs + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Node.js + title: Node.js Metrics + discoverOn: "nodejs_active_handles_total" + items: + - chart: + name: "Active handles" + spans: 4 + metricName: "nodejs_active_handles_total" + dataType: "raw" + - chart: + name: "Active requests" + spans: 4 + metricName: "nodejs_active_requests_total" + dataType: "raw" + - chart: + name: "Event loop lag" + unit: "seconds" + spans: 4 + metricName: "nodejs_eventloop_lag_seconds" + dataType: "raw" + - chart: + name: "Total heap size" + unit: "bytes" + spans: 12 + metricName: "nodejs_heap_space_size_total_bytes" + dataType: "raw" + aggregations: + - label: "space" + displayName: "Space" + - chart: + name: "Used heap size" + unit: "bytes" + spans: 6 + metricName: "nodejs_heap_space_size_used_bytes" + dataType: "raw" + aggregations: + - label: "space" + displayName: "Space" + - chart: + name: "Available heap size" + unit: "bytes" + spans: 6 + metricName: "nodejs_heap_space_size_available_bytes" + dataType: "raw" + aggregations: + - label: "space" + displayName: "Space" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/quarkus.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/quarkus.yaml new file mode 100755 index 000000000..cef5f3dce --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/quarkus.yaml @@ -0,0 +1,33 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: quarkus + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + title: Quarkus Metrics + runtime: Quarkus + items: + - chart: + name: "Thread count" + spans: 4 + metricName: "vendor:thread_count" + dataType: "raw" + - chart: + name: "Used heap" + unit: "bytes" + spans: 4 + metricName: "vendor:memory_heap_usage_bytes" + dataType: "raw" + - chart: + name: "Used non-heap" + unit: "bytes" + spans: 4 + metricName: "vendor:memory_non_heap_usage_bytes" + dataType: "raw" + - include: "microprofile-x.y" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/springboot-jvm-pool.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/springboot-jvm-pool.yaml new file mode 100755 index 000000000..42d87d890 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/springboot-jvm-pool.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: springboot-jvm-pool + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Spring Boot + title: JVM Pool Metrics + items: + - include: "micrometer-1.0.6-jvm-pool" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/springboot-jvm.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/springboot-jvm.yaml new file mode 100755 index 000000000..ced3acdd9 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/springboot-jvm.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: springboot-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Spring Boot + title: JVM Metrics + items: + - include: "micrometer-1.0.6-jvm" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/springboot-tomcat.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/springboot-tomcat.yaml new file mode 100755 index 000000000..c07016aa2 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/springboot-tomcat.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: springboot-tomcat + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Spring Boot + title: Tomcat Metrics + items: + - include: "tomcat" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/thorntail.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/thorntail.yaml new file mode 100755 index 000000000..6bd85e6f5 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/thorntail.yaml @@ -0,0 +1,22 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: thorntail + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Thorntail + title: Thorntail Metrics + discoverOn: "vendor:loaded_modules" + items: + - include: "microprofile-1.1" + - chart: + name: "Loaded modules" + spans: 6 + metricName: "vendor:loaded_modules" + dataType: "raw" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/tomcat.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/tomcat.yaml new file mode 100755 index 000000000..9a803342f --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/tomcat.yaml @@ -0,0 +1,67 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: tomcat + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Tomcat + title: Tomcat Metrics + discoverOn: "tomcat_sessions_created_total" + items: + - chart: + name: "Sessions created" + spans: 4 + metricName: "tomcat_sessions_created_total" + dataType: "raw" + - chart: + name: "Active sessions" + spans: 4 + metricName: "tomcat_sessions_active_current" + dataType: "raw" + - chart: + name: "Sessions rejected" + spans: 4 + metricName: "tomcat_sessions_rejected_total" + dataType: "raw" + + - chart: + name: "Bytes sent" + unit: "bitrate" + spans: 6 + metricName: "tomcat_global_sent_bytes_total" + dataType: "rate" + aggregations: + - label: "name" + displayName: "Name" + - chart: + name: "Bytes received" + unit: "bitrate" + spans: 6 + metricName: "tomcat_global_received_bytes_total" + dataType: "rate" + aggregations: + - label: "name" + displayName: "Name" + + - chart: + name: "Global errors" + spans: 6 + metricName: "tomcat_global_error_total" + dataType: "raw" + aggregations: + - label: "name" + displayName: "Name" + - chart: + name: "Servlet errors" + spans: 6 + metricName: "tomcat_servlet_error_total" + dataType: "raw" + aggregations: + - label: "name" + displayName: "Name" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-client.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-client.yaml new file mode 100755 index 000000000..2d591d6b0 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-client.yaml @@ -0,0 +1,60 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-client + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Client Metrics + discoverOn: "vertx_http_client_connections" + items: + - chart: + name: "Client response time" + unit: "seconds" + spans: 6 + metricName: "vertx_http_client_responseTime_seconds" + dataType: "histogram" + aggregations: + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Client request count rate" + unit: "ops" + spans: 6 + metricName: "vertx_http_client_requestCount_total" + dataType: "rate" + aggregations: + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Client active connections" + spans: 6 + metricName: "vertx_http_client_connections" + dataType: "raw" + - chart: + name: "Client active websockets" + spans: 6 + metricName: "vertx_http_client_wsConnections" + dataType: "raw" + - chart: + name: "Client bytes sent" + unit: "bytes" + spans: 6 + metricName: "vertx_http_client_bytesSent" + dataType: "histogram" + - chart: + name: "Client bytes received" + unit: "bytes" + spans: 6 + metricName: "vertx_http_client_bytesReceived" + dataType: "histogram" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-eventbus.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-eventbus.yaml new file mode 100755 index 000000000..65f9ee2ec --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-eventbus.yaml @@ -0,0 +1,59 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-eventbus + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Eventbus Metrics + discoverOn: "vertx_eventbus_handlers" + items: + - chart: + name: "Event bus handlers" + spans: 6 + metricName: "vertx_eventbus_handlers" + dataType: "raw" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus pending messages" + spans: 6 + metricName: "vertx_eventbus_pending" + dataType: "raw" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus processing time" + unit: "seconds" + spans: 6 + metricName: "vertx_eventbus_processingTime_seconds" + dataType: "histogram" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus bytes read" + unit: "bytes" + spans: 6 + metricName: "vertx_eventbus_bytesRead" + dataType: "histogram" + aggregations: + - label: "address" + displayName: "Eventbus address" + - chart: + name: "Event bus bytes written" + unit: "bytes" + spans: 6 + metricName: "vertx_eventbus_bytesWritten" + dataType: "histogram" + aggregations: + - label: "address" + displayName: "Eventbus address" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-jvm.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-jvm.yaml new file mode 100755 index 000000000..2663186f3 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-jvm.yaml @@ -0,0 +1,16 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-jvm + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: JVM Metrics + items: + - include: "micrometer-1.1-jvm" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-pool.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-pool.yaml new file mode 100755 index 000000000..f6af921b3 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-pool.yaml @@ -0,0 +1,68 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-pool + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Pools Metrics + discoverOn: "vertx_pool_ratio" + items: + - chart: + name: "Usage duration" + unit: "seconds" + spans: 6 + metricName: "vertx_pool_usage_seconds" + dataType: "histogram" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Usage ratio" + spans: 6 + metricName: "vertx_pool_ratio" + dataType: "raw" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Queue size" + spans: 6 + metricName: "vertx_pool_queue_size" + dataType: "raw" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Time in queue" + unit: "seconds" + spans: 6 + metricName: "vertx_pool_queue_delay_seconds" + dataType: "histogram" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" + - chart: + name: "Resources used" + spans: 6 + metricName: "vertx_pool_inUse" + dataType: "raw" + aggregations: + - label: "pool_name" + displayName: "Name" + - label: "pool_type" + displayName: "Type" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-server.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-server.yaml new file mode 100755 index 000000000..de6b89df9 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/dashboards/vertx-server.yaml @@ -0,0 +1,62 @@ +{{- if (include "kiali-server.isDashboardEnabled" .) }} +--- +apiVersion: "monitoring.kiali.io/v1alpha1" +kind: MonitoringDashboard +metadata: + name: vertx-server + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + runtime: Vert.x + title: Vert.x Server Metrics + discoverOn: "vertx_http_server_connections" + items: + - chart: + name: "Server response time" + unit: "seconds" + spans: 6 + metricName: "vertx_http_server_responseTime_seconds" + dataType: "histogram" + aggregations: + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Server request count rate" + unit: "ops" + spans: 6 + metricName: "vertx_http_server_requestCount_total" + dataType: "rate" + aggregations: + - label: "code" + displayName: "Error code" + - label: "path" + displayName: "Path" + - label: "method" + displayName: "Method" + - chart: + name: "Server active connections" + spans: 6 + metricName: "vertx_http_server_connections" + dataType: "raw" + - chart: + name: "Server active websockets" + spans: 6 + metricName: "vertx_http_server_wsConnections" + dataType: "raw" + - chart: + name: "Server bytes sent" + unit: "bytes" + spans: 6 + metricName: "vertx_http_server_bytesSent" + dataType: "histogram" + - chart: + name: "Server bytes received" + unit: "bytes" + spans: 6 + metricName: "vertx_http_server_bytesReceived" + dataType: "histogram" +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/deployment.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/deployment.yaml new file mode 100755 index 000000000..100c57922 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/deployment.yaml @@ -0,0 +1,174 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.deployment.replicas }} + selector: + matchLabels: + {{- include "kiali-server.selectorLabels" . | nindent 6 }} + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + name: {{ include "kiali-server.fullname" . }} + labels: + {{- include "kiali-server.labels" . | nindent 8 }} + {{- if .Values.deployment.pod_labels }} + {{- toYaml .Values.deployment.pod_labels | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.server.metrics_enabled }} + prometheus.io/scrape: "true" + prometheus.io/port: {{ .Values.server.metrics_port | quote }} + {{- else }} + prometheus.io/scrape: "false" + prometheus.io/port: "" + {{- end }} + kiali.io/runtimes: go,kiali + {{- if .Values.deployment.pod_annotations }} + {{- toYaml .Values.deployment.pod_annotations | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ include "kiali-server.fullname" . }} + {{- if .Values.deployment.priority_class_name }} + priorityClassName: {{ .Values.deployment.priority_class_name | quote }} + {{- end }} + {{- if .Values.deployment.image_pull_secrets }} + imagePullSecrets: + {{- range .Values.deployment.image_pull_secrets }} + - name: {{ . }} + {{- end }} + {{- end }} + containers: + - image: "{{ template "system_default_registry" . }}{{ .Values.deployment.repository }}:{{ .Values.deployment.tag }}" + imagePullPolicy: {{ .Values.deployment.image_pull_policy | default "Always" }} + name: {{ include "kiali-server.fullname" . }} + command: + - "/opt/kiali/kiali" + - "-config" + - "/kiali-configuration/config.yaml" + ports: + - name: api-port + containerPort: {{ .Values.server.port | default 20001 }} + {{- if .Values.server.metrics_enabled }} + - name: http-metrics + containerPort: {{ .Values.server.metrics_port | default 9090 }} + {{- end }} + readinessProbe: + httpGet: + path: {{ include "kiali-server.server.web_root" . | trimSuffix "/" }}/healthz + port: api-port + {{- if (include "kiali-server.identity.cert_file" .) }} + scheme: HTTPS + {{- else }} + scheme: HTTP + {{- end }} + initialDelaySeconds: 5 + periodSeconds: 30 + livenessProbe: + httpGet: + path: {{ include "kiali-server.server.web_root" . | trimSuffix "/" }}/healthz + port: api-port + {{- if (include "kiali-server.identity.cert_file" .) }} + scheme: HTTPS + {{- else }} + scheme: HTTP + {{- end }} + initialDelaySeconds: 5 + periodSeconds: 30 + env: + - name: ACTIVE_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LOG_LEVEL + value: "{{ include "kiali-server.logLevel" . }}" + - name: LOG_FORMAT + value: "{{ .Values.deployment.logger.log_format }}" + - name: LOG_TIME_FIELD_FORMAT + value: "{{ .Values.deployment.logger.time_field_format }}" + - name: LOG_SAMPLER_RATE + value: "{{ .Values.deployment.logger.sampler_rate }}" + volumeMounts: + {{- if .Values.web_root_override }} + - name: kiali-console + subPath: env.js + mountPath: /opt/kiali/console/env.js + {{- end }} + - name: {{ include "kiali-server.fullname" . }}-configuration + mountPath: "/kiali-configuration" + - name: {{ include "kiali-server.fullname" . }}-cert + mountPath: "/kiali-cert" + - name: {{ include "kiali-server.fullname" . }}-secret + mountPath: "/kiali-secret" + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + - name: {{ include "kiali-server.fullname" . }}-cabundle + mountPath: "/kiali-cabundle" + {{- end }} + {{- if .Values.deployment.resources }} + resources: + {{- toYaml .Values.deployment.resources | nindent 10 }} + {{- end }} + volumes: + {{- if .Values.web_root_override }} + - name: kiali-console + configMap: + name: kiali-console + items: + - key: env.js + path: env.js + {{- end }} + - name: {{ include "kiali-server.fullname" . }}-configuration + configMap: + name: {{ include "kiali-server.fullname" . }} + - name: {{ include "kiali-server.fullname" . }}-cert + secret: + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + secretName: {{ include "kiali-server.fullname" . }}-cert-secret + {{- else }} + secretName: istio.{{ include "kiali-server.fullname" . }}-service-account + {{- end }} + {{- if not (include "kiali-server.identity.cert_file" .) }} + optional: true + {{- end }} + - name: {{ include "kiali-server.fullname" . }}-secret + secret: + secretName: {{ .Values.deployment.secret_name }} + optional: true + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + - name: {{ include "kiali-server.fullname" . }}-cabundle + configMap: + name: {{ include "kiali-server.fullname" . }}-cabundle + {{- end }} + {{- if or (.Values.deployment.affinity.node) (or (.Values.deployment.pod) (.Values.deployment.pod_anti)) }} + affinity: + {{- if .Values.deployment.affinity.node }} + nodeAffinity: + {{- toYaml .Values.deployment.affinity.node | nindent 10 }} + {{- end }} + {{- if .Values.deployment.affinity.pod }} + podAffinity: + {{- toYaml .Values.deployment.affinity.pod | nindent 10 }} + {{- end }} + {{- if .Values.deployment.affinity.pod_anti }} + podAntiAffinity: + {{- toYaml .Values.deployment.affinity.pod_anti | nindent 10 }} + {{- end }} + {{- end }} + {{- if .Values.deployment.tolerations }} + tolerations: + {{- toYaml .Values.deployment.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.deployment.node_selector }} + nodeSelector: + {{- toYaml .Values.deployment.node_selector | nindent 8 }} + {{- end }} +... diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/hpa.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/hpa.yaml new file mode 100755 index 000000000..934c4c1e9 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/hpa.yaml @@ -0,0 +1,17 @@ +{{- if .Values.deployment.hpa.spec }} +--- +apiVersion: {{ .Values.deployment.hpa.api_version }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "kiali-server.fullname" . }} + {{- toYaml .Values.deployment.hpa.spec | nindent 2 }} +... +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/ingress.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/ingress.yaml new file mode 100755 index 000000000..e4c98db1b --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/ingress.yaml @@ -0,0 +1,40 @@ +{{- if not (.Capabilities.APIVersions.Has "route.openshift.io/v1") }} +{{- if .Values.deployment.ingress_enabled }} +--- +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + annotations: + {{- if hasKey .Values.deployment.override_ingress_yaml.metadata "annotations" }} + {{- toYaml .Values.deployment.override_ingress_yaml.metadata.annotations | nindent 4 }} + {{- else }} + # For ingress-nginx versions older than 0.20.0 use secure-backends. + # (see: https://github.com/kubernetes/ingress-nginx/issues/3416#issuecomment-438247948) + # For ingress-nginx versions 0.20.0 and later use backend-protocol. + {{- if (include "kiali-server.identity.cert_file" .) }} + nginx.ingress.kubernetes.io/secure-backends: "true" + nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" + {{- else }} + nginx.ingress.kubernetes.io/secure-backends: "false" + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" + {{- end }} + {{- end }} +spec: + {{- if hasKey .Values.deployment.override_ingress_yaml "spec" }} + {{- toYaml .Values.deployment.override_ingress_yaml.spec | nindent 2 }} + {{- else }} + rules: + - http: + paths: + - path: {{ include "kiali-server.server.web_root" . }} + backend: + serviceName: {{ include "kiali-server.fullname" . }} + servicePort: {{ .Values.server.port }} + {{- end }} +... +{{- end }} +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/oauth.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/oauth.yaml new file mode 100755 index 000000000..a178bb85e --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/oauth.yaml @@ -0,0 +1,17 @@ +{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} +{{- if .Values.kiali_route_url }} +--- +apiVersion: oauth.openshift.io/v1 +kind: OAuthClient +metadata: + name: {{ include "kiali-server.fullname" . }}-{{ .Release.Namespace }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +redirectURIs: +- {{ .Values.kiali_route_url }} +grantMethod: auto +allowAnyScope: true +... +{{- end }} +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/psp.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/psp.yaml new file mode 100755 index 000000000..f891892cc --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/psp.yaml @@ -0,0 +1,67 @@ +{{- if .Values.global.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "kiali-server.fullname" . }}-psp + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "kiali-server.fullname" . }}-psp +subjects: + - kind: ServiceAccount + name: kiali +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "kiali-server.fullname" . }}-psp + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: + - policy + resourceNames: + - {{ include "kiali-server.fullname" . }}-psp + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "kiali-server.fullname" . }}-psp + namespace: {{ .Release.Namespace }} +spec: + allowPrivilegeEscalation: false + forbiddenSysctls: + - '*' + fsGroup: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + requiredDropCapabilities: + - ALL + runAsUser: + rule: MustRunAsNonRoot + runAsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + seLinux: + rule: RunAsAny + supplementalGroups: + ranges: + - max: 65535 + min: 1 + rule: MustRunAs + volumes: + - configMap + - emptyDir + - projected + - secret + - downwardAPI + - persistentVolumeClaim +{{- end }} \ No newline at end of file diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/role-controlplane.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/role-controlplane.yaml new file mode 100755 index 000000000..a22c76756 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/role-controlplane.yaml @@ -0,0 +1,15 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "kiali-server.fullname" . }}-controlplane + namespace: {{ include "kiali-server.istio_namespace" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: + - secrets + verbs: + - list +... diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/role-viewer.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/role-viewer.yaml new file mode 100755 index 000000000..9fdd9fd1d --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/role-viewer.yaml @@ -0,0 +1,97 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "kiali-server.fullname" . }}-viewer + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: + - configmaps + - endpoints + - namespaces + - nodes + - pods + - pods/log + - pods/proxy + - replicationcontrollers + - services + verbs: + - get + - list + - watch +- apiGroups: [""] + resources: + - pods/portforward + verbs: + - create + - post +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - watch +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch +- apiGroups: ["batch"] + resources: + - cronjobs + - jobs + verbs: + - get + - list + - watch +- apiGroups: + - networking.istio.io + - security.istio.io + resources: ["*"] + verbs: + - get + - list + - watch +- apiGroups: ["apps.openshift.io"] + resources: + - deploymentconfigs + verbs: + - get + - list + - watch +- apiGroups: ["project.openshift.io"] + resources: + - projects + verbs: + - get +- apiGroups: ["route.openshift.io"] + resources: + - routes + verbs: + - get +- apiGroups: ["monitoring.kiali.io"] + resources: + - monitoringdashboards + verbs: + - get + - list +- apiGroups: ["iter8.tools"] + resources: + - experiments + verbs: + - get + - list +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: + - create +... diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/role.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/role.yaml new file mode 100755 index 000000000..8444bc753 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/role.yaml @@ -0,0 +1,108 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "kiali-server.fullname" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +rules: +- apiGroups: [""] + resources: + - configmaps + - endpoints + - namespaces + - nodes + - pods + - pods/log + - pods/proxy + - replicationcontrollers + - services + verbs: + - get + - list + - patch + - watch +- apiGroups: [""] + resources: + - pods/portforward + verbs: + - create + - post +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - get + - list + - patch + - watch +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: + - get + - list + - watch +- apiGroups: ["batch"] + resources: + - cronjobs + - jobs + verbs: + - get + - list + - patch + - watch +- apiGroups: + - networking.istio.io + - security.istio.io + resources: ["*"] + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: ["apps.openshift.io"] + resources: + - deploymentconfigs + verbs: + - get + - list + - patch + - watch +- apiGroups: ["project.openshift.io"] + resources: + - projects + verbs: + - get +- apiGroups: ["route.openshift.io"] + resources: + - routes + verbs: + - get +- apiGroups: ["monitoring.kiali.io"] + resources: + - monitoringdashboards + verbs: + - get + - list +- apiGroups: ["iter8.tools"] + resources: + - experiments + verbs: + - create + - delete + - get + - list + - patch + - watch +- apiGroups: ["authentication.k8s.io"] + resources: + - tokenreviews + verbs: + - create +... diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/rolebinding-controlplane.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/rolebinding-controlplane.yaml new file mode 100755 index 000000000..5a0015836 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/rolebinding-controlplane.yaml @@ -0,0 +1,17 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "kiali-server.fullname" . }}-controlplane + namespace: {{ include "kiali-server.istio_namespace" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "kiali-server.fullname" . }}-controlplane +subjects: +- kind: ServiceAccount + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} +... diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/rolebinding.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/rolebinding.yaml new file mode 100755 index 000000000..1eaabd65f --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/rolebinding.yaml @@ -0,0 +1,20 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "kiali-server.fullname" . }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + {{- if .Values.deployment.view_only_mode }} + name: {{ include "kiali-server.fullname" . }}-viewer + {{- else }} + name: {{ include "kiali-server.fullname" . }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} +... diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/route.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/route.yaml new file mode 100755 index 000000000..27940dc96 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/route.yaml @@ -0,0 +1,30 @@ +{{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} +{{- if .Values.deployment.ingress_enabled }} +# As of OpenShift 4.5, need to use --disable-openapi-validation when installing via Helm +--- +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + {{- if hasKey .Values.deployment.override_ingress_yaml.metadata "annotations" }}} + annotations: + {{- toYaml .Values.deployment.override_ingress_yaml.metadata.annotations | nindent 4 }} + {{- end }} +spec: + {{- if hasKey .Values.deployment.override_ingress_yaml "spec" }} + {{- toYaml .Values.deployment.override_ingress_yaml.spec | nindent 2 }} + {{- else }} + tls: + termination: reencrypt + insecureEdgeTerminationPolicy: Redirect + to: + kind: Service + targetPort: {{ .Values.server.port }} + name: {{ include "kiali-server.fullname" . }} + {{- end }} +... +{{- end }} +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/service.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/service.yaml new file mode 100755 index 000000000..9ccf4f388 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/service.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} + annotations: + {{- if .Capabilities.APIVersions.Has "route.openshift.io/v1" }} + service.beta.openshift.io/serving-cert-secret-name: {{ include "kiali-server.fullname" . }}-cert-secret + {{- end }} + kiali.io/api-spec: https://kiali.io/api + kiali.io/api-type: rest + {{- if and (not (empty .Values.server.web_fqdn)) (not (empty .Values.server.web_schema)) }} + {{- if empty .Values.server.web_port }} + kiali.io/external-url: {{ .Values.server.web_schema }}://{{ .Values.server.web_fqdn }}{{ default "" .Values.server.web_root }} + {{- else }} + kiali.io/external-url: {{ .Values.server.web_schema }}://{{ .Values.server.web_fqdn }}:{{ .Values.server.web_port }}{{(default "" .Values.server.web_root) }} + {{- end }} + {{- end }} + {{- if .Values.deployment.service_annotations }} + {{- toYaml .Values.deployment.service_annotations | nindent 4 }} + {{- end }} +spec: + {{- if .Values.deployment.service_type }} + type: {{ .Values.deployment.service_type }} + {{- end }} + ports: + {{- if (include "kiali-server.identity.cert_file" .) }} + - name: tcp + {{- else }} + - name: http + {{- end }} + protocol: TCP + port: {{ .Values.server.port }} + {{- if .Values.server.metrics_enabled }} + - name: http-metrics + protocol: TCP + port: {{ .Values.server.metrics_port }} + {{- end }} + selector: + {{- include "kiali-server.selectorLabels" . | nindent 4 }} + {{- if .Values.deployment.additional_service_yaml }} + {{- toYaml .Values.deployment.additional_service_yaml | nindent 2 }} + {{- end }} +... diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/serviceaccount.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/serviceaccount.yaml new file mode 100755 index 000000000..9151b6f6a --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/serviceaccount.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "kiali-server.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +... diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/validate-install-crd.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/validate-install-crd.yaml new file mode 100755 index 000000000..b42eeb266 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/validate-install-crd.yaml @@ -0,0 +1,14 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +# {{- $found := dict -}} +# {{- set $found "monitoring.kiali.io/v1alpha1/MonitoringDashboard" false -}} +# {{- range .Capabilities.APIVersions -}} +# {{- if hasKey $found (toString .) -}} +# {{- set $found (toString .) true -}} +# {{- end -}} +# {{- end -}} +# {{- range $_, $exists := $found -}} +# {{- if (eq $exists false) -}} +# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}} +# {{- end -}} +# {{- end -}} +#{{- end -}} \ No newline at end of file diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/web-root-configmap.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/web-root-configmap.yaml new file mode 100755 index 000000000..970d4e4f5 --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/templates/web-root-configmap.yaml @@ -0,0 +1,12 @@ +{{- if .Values.web_root_override }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: kiali-console + namespace: {{ .Release.Namespace }} + labels: + {{- include "kiali-server.labels" . | nindent 4 }} +data: + env.js: | + window.WEB_ROOT='/k8s/clusters/{{ .Values.global.cattle.clusterId }}/api/v1/namespaces/{{ .Release.Namespace }}/services/http:kiali:20001/proxy/kiali'; +{{- end }} diff --git a/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/values.yaml b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/values.yaml new file mode 100755 index 000000000..aada4e09a --- /dev/null +++ b/charts/rancher-kiali-server/rancher-kiali-server/1.32.100/values.yaml @@ -0,0 +1,93 @@ +nameOverride: "kiali" +fullnameOverride: "kiali" + +# This is required for "openshift" auth strategy. +# You have to know ahead of time what your Route URL will be because +# right now the helm chart can't figure this out at runtime (it would +# need to wait for the Kiali Route to be deployed and for OpenShift +# to start it up). If someone knows how to update this helm chart to +# do this, a PR would be welcome. +kiali_route_url: "" + +# rancher specific override that allows proxy access to kiali url +web_root_override: true + +# +# Settings that mimic the Kiali CR which are placed in the ConfigMap. +# Note that only those values used by the Helm Chart will be here. +# + +istio_namespace: "" # default is where Kiali is installed + +auth: + openid: {} + openshift: {} + strategy: "" + +deployment: + # This only limits what Kiali will attempt to see, but Kiali Service Account has permissions to see everything. + # For more control over what the Kial Service Account can see, use the Kiali Operator + accessible_namespaces: + - "**" + additional_service_yaml: {} + affinity: + node: {} + pod: {} + pod_anti: {} + custom_dashboards: + excludes: [''] + includes: ['*'] + hpa: + api_version: "autoscaling/v2beta2" + spec: {} + repository: rancher/mirrored-kiali-kiali + image_pull_policy: "Always" + image_pull_secrets: [] + tag: v1.32.0 + ingress_enabled: true + logger: + log_format: "text" + log_level: "info" + time_field_format: "2006-01-02T15:04:05Z07:00" + sampler_rate: "1" + node_selector: {} + override_ingress_yaml: + metadata: {} + pod_annotations: {} + pod_labels: {} + priority_class_name: "" + replicas: 1 + resources: {} + secret_name: "kiali" + service_annotations: {} + service_type: "" + tolerations: [] + version_label: v1.32.0 + view_only_mode: false + +external_services: + custom_dashboards: + enabled: true + +identity: {} + #cert_file: + #private_key_file: + +login_token: + signing_key: "" + +server: + port: 20001 + metrics_enabled: true + metrics_port: 9090 + web_root: "" + +# Common settings used among istio subcharts. +global: + # Specify rancher clusterId of external tracing config + # https://github.com/istio/istio.io/issues/4146#issuecomment-493543032 + cattle: + systemDefaultRegistry: "" + clusterId: + rbac: + pspEnabled: false diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/.helmignore b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/Chart.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/Chart.yaml new file mode 100755 index 000000000..aab9c879e --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-kube-state-metrics +apiVersion: v1 +appVersion: 1.9.8 +description: Install kube-state-metrics to generate and expose cluster-level metrics +home: https://github.com/kubernetes/kube-state-metrics/ +keywords: +- metric +- monitoring +- prometheus +- kubernetes +maintainers: +- email: tariq.ibrahim@mulesoft.com + name: tariq1890 +- email: manuel@rueg.eu + name: mrueg +name: rancher-kube-state-metrics +sources: +- https://github.com/kubernetes/kube-state-metrics/ +version: 2.13.101 diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/LICENSE b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/LICENSE new file mode 100755 index 000000000..393b7a33b --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright The Helm Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/OWNERS b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/OWNERS new file mode 100755 index 000000000..206b4fee7 --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/OWNERS @@ -0,0 +1,6 @@ +approvers: +- tariq1890 +- mrueg +reviewers: +- tariq1890 +- mrueg diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/README.md b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/README.md new file mode 100755 index 000000000..e93a3d252 --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/README.md @@ -0,0 +1,66 @@ +# kube-state-metrics Helm Chart + +Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state-metrics). + +## Get Repo Info + +```console +helm repo add kube-state-metrics https://kubernetes.github.io/kube-state-metrics +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +```console +# Helm 3 +$ helm install [RELEASE_NAME] kube-state-metrics/kube-state-metrics [flags] + +# Helm 2 +$ helm install --name [RELEASE_NAME] kube-state-metrics/kube-state-metrics [flags] +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +# Helm 3 +$ helm uninstall [RELEASE_NAME] + +# Helm 2 +# helm delete --purge [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +# Helm 3 or 2 +$ helm upgrade [RELEASE_NAME] kube-state-metrics/kube-state-metrics [flags] +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### From stable/kube-state-metrics + +You can upgrade in-place: + +1. [get repo info](#get-repo-info) +1. [upgrade](#upgrading-chart) your existing release name using the new chart repo + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments: + +```console +helm show values kube-state-metrics/kube-state-metrics +``` + +You may also `helm show values` on this chart's [dependencies](#dependencies) for additional options. diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/NOTES.txt b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/NOTES.txt new file mode 100755 index 000000000..5a646e0cc --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/NOTES.txt @@ -0,0 +1,10 @@ +kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects. +The exposed metrics can be found here: +https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics + +The metrics are exported on the HTTP endpoint /metrics on the listening port. +In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics + +They are served either as plaintext or protobuf depending on the Accept header. +They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint. + diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/_helpers.tpl b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/_helpers.tpl new file mode 100755 index 000000000..4f76b188b --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/_helpers.tpl @@ -0,0 +1,76 @@ +# Rancher +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "kube-state-metrics.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kube-state-metrics.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kube-state-metrics.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "kube-state-metrics.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "kube-state-metrics.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/clusterrolebinding.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/clusterrolebinding.yaml new file mode 100755 index 000000000..af158c512 --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.rbac.create .Values.rbac.useClusterRole -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ template "kube-state-metrics.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} +{{- else }} + name: {{ template "kube-state-metrics.fullname" . }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end -}} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/deployment.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/deployment.yaml new file mode 100755 index 000000000..4ab55291b --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/deployment.yaml @@ -0,0 +1,217 @@ +apiVersion: apps/v1 +{{- if .Values.autosharding.enabled }} +kind: StatefulSet +{{- else }} +kind: Deployment +{{- end }} +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + app.kubernetes.io/version: "{{ .Chart.AppVersion }}" +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + replicas: {{ .Values.replicas }} +{{- if .Values.autosharding.enabled }} + serviceName: {{ template "kube-state-metrics.fullname" . }} + volumeClaimTemplates: [] +{{- end }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + app.kubernetes.io/instance: "{{ .Release.Name }}" +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 8 }} +{{- end }} +{{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: + hostNetwork: {{ .Values.hostNetwork }} + serviceAccountName: {{ template "kube-state-metrics.serviceAccountName" . }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + containers: + - name: {{ .Chart.Name }} +{{- if .Values.autosharding.enabled }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +{{- end }} + args: +{{ if .Values.extraArgs }} + {{- range .Values.extraArgs }} + - {{ . }} + {{- end }} +{{ end }} +{{ if .Values.collectors.certificatesigningrequests }} + - --collectors=certificatesigningrequests +{{ end }} +{{ if .Values.collectors.configmaps }} + - --collectors=configmaps +{{ end }} +{{ if .Values.collectors.cronjobs }} + - --collectors=cronjobs +{{ end }} +{{ if .Values.collectors.daemonsets }} + - --collectors=daemonsets +{{ end }} +{{ if .Values.collectors.deployments }} + - --collectors=deployments +{{ end }} +{{ if .Values.collectors.endpoints }} + - --collectors=endpoints +{{ end }} +{{ if .Values.collectors.horizontalpodautoscalers }} + - --collectors=horizontalpodautoscalers +{{ end }} +{{ if .Values.collectors.ingresses }} + - --collectors=ingresses +{{ end }} +{{ if .Values.collectors.jobs }} + - --collectors=jobs +{{ end }} +{{ if .Values.collectors.limitranges }} + - --collectors=limitranges +{{ end }} +{{ if .Values.collectors.mutatingwebhookconfigurations }} + - --collectors=mutatingwebhookconfigurations +{{ end }} +{{ if .Values.collectors.namespaces }} + - --collectors=namespaces +{{ end }} +{{ if .Values.collectors.networkpolicies }} + - --collectors=networkpolicies +{{ end }} +{{ if .Values.collectors.nodes }} + - --collectors=nodes +{{ end }} +{{ if .Values.collectors.persistentvolumeclaims }} + - --collectors=persistentvolumeclaims +{{ end }} +{{ if .Values.collectors.persistentvolumes }} + - --collectors=persistentvolumes +{{ end }} +{{ if .Values.collectors.poddisruptionbudgets }} + - --collectors=poddisruptionbudgets +{{ end }} +{{ if .Values.collectors.pods }} + - --collectors=pods +{{ end }} +{{ if .Values.collectors.replicasets }} + - --collectors=replicasets +{{ end }} +{{ if .Values.collectors.replicationcontrollers }} + - --collectors=replicationcontrollers +{{ end }} +{{ if .Values.collectors.resourcequotas }} + - --collectors=resourcequotas +{{ end }} +{{ if .Values.collectors.secrets }} + - --collectors=secrets +{{ end }} +{{ if .Values.collectors.services }} + - --collectors=services +{{ end }} +{{ if .Values.collectors.statefulsets }} + - --collectors=statefulsets +{{ end }} +{{ if .Values.collectors.storageclasses }} + - --collectors=storageclasses +{{ end }} +{{ if .Values.collectors.validatingwebhookconfigurations }} + - --collectors=validatingwebhookconfigurations +{{ end }} +{{ if .Values.collectors.verticalpodautoscalers }} + - --collectors=verticalpodautoscalers +{{ end }} +{{ if .Values.collectors.volumeattachments }} + - --collectors=volumeattachments +{{ end }} +{{ if .Values.namespace }} + - --namespace={{ .Values.namespace | join "," }} +{{ end }} +{{ if .Values.autosharding.enabled }} + - --pod=$(POD_NAME) + - --pod-namespace=$(POD_NAMESPACE) +{{ end }} +{{ if .Values.kubeconfig.enabled }} + - --kubeconfig=/opt/k8s/.kube/config +{{ end }} +{{ if .Values.selfMonitor.telemetryHost }} + - --telemetry-host={{ .Values.selfMonitor.telemetryHost }} +{{ end }} + - --telemetry-port=8081 +{{- if .Values.kubeconfig.enabled }} + volumeMounts: + - name: kubeconfig + mountPath: /opt/k8s/.kube/ + readOnly: true +{{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}" + ports: + - containerPort: 8080 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: / + port: 8080 + initialDelaySeconds: 5 + timeoutSeconds: 5 +{{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 10 }} +{{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} +{{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} +{{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.nodeSelector }} +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.tolerations }} +{{ toYaml .Values.tolerations | indent 8 }} +{{- end }} +{{- if .Values.kubeconfig.enabled}} + volumes: + - name: kubeconfig + secret: + secretName: {{ template "kube-state-metrics.fullname" . }}-kubeconfig +{{- end }} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/kubeconfig-secret.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/kubeconfig-secret.yaml new file mode 100755 index 000000000..a7800d7ad --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/kubeconfig-secret.yaml @@ -0,0 +1,15 @@ +{{- if .Values.kubeconfig.enabled -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kube-state-metrics.fullname" . }}-kubeconfig + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" +type: Opaque +data: + config: '{{ .Values.kubeconfig.secret }}' +{{- end -}} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/pdb.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/pdb.yaml new file mode 100755 index 000000000..d3ef8104e --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/pdb.yaml @@ -0,0 +1,20 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end -}} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/podsecuritypolicy.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/podsecuritypolicy.yaml new file mode 100755 index 000000000..e822ba0e7 --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/podsecuritypolicy.yaml @@ -0,0 +1,42 @@ +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Values.podSecurityPolicy.annotations }} + annotations: +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + volumes: + - 'secret' +{{- if .Values.podSecurityPolicy.additionalVolumes }} +{{ toYaml .Values.podSecurityPolicy.additionalVolumes | indent 4 }} +{{- end }} + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/psp-clusterrole.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/psp-clusterrole.yaml new file mode 100755 index 000000000..217abc950 --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/psp-clusterrole.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: psp-{{ template "kube-state-metrics.fullname" . }} +rules: +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} +- apiGroups: ['policy'] +{{- else }} +- apiGroups: ['extensions'] +{{- end }} + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "kube-state-metrics.fullname" . }} +{{- end }} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/psp-clusterrolebinding.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/psp-clusterrolebinding.yaml new file mode 100755 index 000000000..feb97f228 --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/psp-clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: psp-{{ template "kube-state-metrics.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp-{{ template "kube-state-metrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end }} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/role.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/role.yaml new file mode 100755 index 000000000..6259d2f61 --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/role.yaml @@ -0,0 +1,192 @@ +{{- if and (eq $.Values.rbac.create true) (not .Values.rbac.useExistingRole) -}} +{{- if eq .Values.rbac.useClusterRole false }} +{{- range (split "," $.Values.namespace) }} +{{- end }} +{{- end -}} +--- +apiVersion: rbac.authorization.k8s.io/v1 +{{- if eq .Values.rbac.useClusterRole false }} +kind: Role +{{- else }} +kind: ClusterRole +{{- end }} +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" $ }} + helm.sh/chart: {{ $.Chart.Name }}-{{ $.Chart.Version }} + app.kubernetes.io/managed-by: {{ $.Release.Service }} + app.kubernetes.io/instance: {{ $.Release.Name }} + name: {{ template "kube-state-metrics.fullname" $ }} +{{- if eq .Values.rbac.useClusterRole false }} + namespace: {{ . }} +{{- end }} +rules: +{{ if $.Values.collectors.certificatesigningrequests }} +- apiGroups: ["certificates.k8s.io"] + resources: + - certificatesigningrequests + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.configmaps }} +- apiGroups: [""] + resources: + - configmaps + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.cronjobs }} +- apiGroups: ["batch"] + resources: + - cronjobs + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.daemonsets }} +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.deployments }} +- apiGroups: ["extensions", "apps"] + resources: + - deployments + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.endpoints }} +- apiGroups: [""] + resources: + - endpoints + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.horizontalpodautoscalers }} +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.ingresses }} +- apiGroups: ["extensions", "networking.k8s.io"] + resources: + - ingresses + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.jobs }} +- apiGroups: ["batch"] + resources: + - jobs + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.limitranges }} +- apiGroups: [""] + resources: + - limitranges + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.mutatingwebhookconfigurations }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - mutatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.namespaces }} +- apiGroups: [""] + resources: + - namespaces + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.networkpolicies }} +- apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.nodes }} +- apiGroups: [""] + resources: + - nodes + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.persistentvolumeclaims }} +- apiGroups: [""] + resources: + - persistentvolumeclaims + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.persistentvolumes }} +- apiGroups: [""] + resources: + - persistentvolumes + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.poddisruptionbudgets }} +- apiGroups: ["policy"] + resources: + - poddisruptionbudgets + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.pods }} +- apiGroups: [""] + resources: + - pods + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.replicasets }} +- apiGroups: ["extensions", "apps"] + resources: + - replicasets + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.replicationcontrollers }} +- apiGroups: [""] + resources: + - replicationcontrollers + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.resourcequotas }} +- apiGroups: [""] + resources: + - resourcequotas + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.secrets }} +- apiGroups: [""] + resources: + - secrets + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.services }} +- apiGroups: [""] + resources: + - services + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.statefulsets }} +- apiGroups: ["apps"] + resources: + - statefulsets + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.storageclasses }} +- apiGroups: ["storage.k8s.io"] + resources: + - storageclasses + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.validatingwebhookconfigurations }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - validatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.volumeattachments }} +- apiGroups: ["storage.k8s.io"] + resources: + - volumeattachments + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.verticalpodautoscalers }} +- apiGroups: ["autoscaling.k8s.io"] + resources: + - verticalpodautoscalers + verbs: ["list", "watch"] +{{ end -}} +{{- end -}} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/rolebinding.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/rolebinding.yaml new file mode 100755 index 000000000..732174a33 --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/rolebinding.yaml @@ -0,0 +1,27 @@ +{{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}} +{{- range (split "," $.Values.namespace) }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" $ }} + helm.sh/chart: {{ $.Chart.Name }}-{{ $.Chart.Version }} + app.kubernetes.io/managed-by: {{ $.Release.Service }} + app.kubernetes.io/instance: {{ $.Release.Name }} + name: {{ template "kube-state-metrics.fullname" $ }} + namespace: {{ . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role +{{- if (not $.Values.rbac.useExistingRole) }} + name: {{ template "kube-state-metrics.fullname" $ }} +{{- else }} + name: {{ $.Values.rbac.useExistingRole }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "kube-state-metrics.fullname" $ }} + namespace: {{ template "kube-state-metrics.namespace" $ }} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/service.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/service.yaml new file mode 100755 index 000000000..4f8e4a497 --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/service.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} + annotations: + {{- if .Values.prometheusScrape }} + prometheus.io/scrape: '{{ .Values.prometheusScrape }}' + {{- end }} + {{- if .Values.service.annotations }} + {{- toYaml .Values.service.annotations | nindent 4 }} + {{- end }} +spec: + type: "{{ .Values.service.type }}" + ports: + - name: "http" + protocol: TCP + port: {{ .Values.service.port }} + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + targetPort: 8080 + {{ if .Values.selfMonitor.enabled }} + - name: "metrics" + protocol: TCP + port: {{ .Values.selfMonitor.telemetryPort | default 8081 }} + targetPort: 8081 + {{ end }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.service.loadBalancerIP }}" +{{- end }} + selector: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/serviceaccount.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/serviceaccount.yaml new file mode 100755 index 000000000..2e8a1ee38 --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- if .Values.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.serviceAccount.annotations | indent 4 }} +{{- end }} +imagePullSecrets: +{{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }} +{{- end -}} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/servicemonitor.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/servicemonitor.yaml new file mode 100755 index 000000000..7d1cd7aa1 --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/servicemonitor.yaml @@ -0,0 +1,34 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + {{- if .Values.prometheus.monitor.additionalLabels }} +{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }} + {{- end }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + endpoints: + - port: http + {{- if .Values.prometheus.monitor.honorLabels }} + honorLabels: true + {{- end }} + {{ if .Values.selfMonitor.enabled }} + - port: metrics + {{- if .Values.prometheus.monitor.honorLabels }} + honorLabels: true + {{- end }} + {{ end }} +{{- end }} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/stsdiscovery-role.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/stsdiscovery-role.yaml new file mode 100755 index 000000000..9770b0498 --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/stsdiscovery-role.yaml @@ -0,0 +1,29 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - apps + resourceNames: + - {{ template "kube-state-metrics.fullname" . }} + resources: + - statefulsets + verbs: + - get + - list + - watch +{{- end }} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/stsdiscovery-rolebinding.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/stsdiscovery-rolebinding.yaml new file mode 100755 index 000000000..6a2e5bfe7 --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/templates/stsdiscovery-rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end }} diff --git a/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/values.yaml b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/values.yaml new file mode 100755 index 000000000..f64645690 --- /dev/null +++ b/charts/rancher-kube-state-metrics/rancher-kube-state-metrics/2.13.101/values.yaml @@ -0,0 +1,184 @@ +global: + cattle: + systemDefaultRegistry: "" + +# Default values for kube-state-metrics. +prometheusScrape: true +image: + repository: rancher/mirrored-kube-state-metrics-kube-state-metrics + tag: v1.9.8 + pullPolicy: IfNotPresent + +imagePullSecrets: [] +# - name: "image-pull-secret" + +# If set to true, this will deploy kube-state-metrics as a StatefulSet and the data +# will be automatically sharded across <.Values.replicas> pods using the built-in +# autodiscovery feature: https://github.com/kubernetes/kube-state-metrics#automated-sharding +# This is an experimental feature and there are no stability guarantees. +autosharding: + enabled: false + +replicas: 1 + +# List of additional cli arguments to configure kube-state-metrics +# for example: --enable-gzip-encoding, --log-file, etc. +# all the possible args can be found here: https://github.com/kubernetes/kube-state-metrics/blob/master/docs/cli-arguments.md +extraArgs: [] + +service: + port: 8080 + # Default to clusterIP for backward compatibility + type: ClusterIP + nodePort: 0 + loadBalancerIP: "" + annotations: {} + +customLabels: {} + +hostNetwork: false + +rbac: + # If true, create & use RBAC resources + create: true + + # Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to it, rolename set here. + # useExistingRole: your-existing-role + + # If set to false - Run without Cluteradmin privs needed - ONLY works if namespace is also set (if useExistingRole is set this name is used as ClusterRole or Role to bind to) + useClusterRole: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created, require rbac true + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # Reference to one or more secrets to be used when pulling images + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + imagePullSecrets: [] + # ServiceAccount annotations. + # Use case: AWS EKS IAM roles for service accounts + # ref: https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html + annotations: {} + +prometheus: + monitor: + enabled: false + additionalLabels: {} + namespace: "" + honorLabels: false + +## Specify if a Pod Security Policy for kube-state-metrics must be created +## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + enabled: false + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + additionalVolumes: [] + +securityContext: + enabled: true + runAsNonRoot: true + runAsGroup: 65534 + runAsUser: 65534 + fsGroup: 65534 + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +## Affinity settings for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +affinity: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +# Annotations to be added to the pod +podAnnotations: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: "" + +# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} + +# Available collectors for kube-state-metrics. By default all available +# collectors are enabled. +collectors: + certificatesigningrequests: true + configmaps: true + cronjobs: true + daemonsets: true + deployments: true + endpoints: true + horizontalpodautoscalers: true + ingresses: true + jobs: true + limitranges: true + mutatingwebhookconfigurations: true + namespaces: true + networkpolicies: true + nodes: true + persistentvolumeclaims: true + persistentvolumes: true + poddisruptionbudgets: true + pods: true + replicasets: true + replicationcontrollers: true + resourcequotas: true + secrets: true + services: true + statefulsets: true + storageclasses: true + validatingwebhookconfigurations: true + verticalpodautoscalers: false + volumeattachments: true + +# Enabling kubeconfig will pass the --kubeconfig argument to the container +kubeconfig: + enabled: false + # base64 encoded kube-config file + secret: + +# Namespace to be enabled for collecting resources. By default all namespaces are collected. +# namespace: "" + +## Override the deployment namespace +## +namespaceOverride: "" + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + +## Provide a k8s version to define apiGroups for podSecurityPolicy Cluster Role. +## For example: kubeTargetVersionOverride: 1.14.9 +## +kubeTargetVersionOverride: "" + +# Enable self metrics configuration for service and Service Monitor +# Default values for telemetry configuration can be overriden +selfMonitor: + enabled: false + # telemetryHost: 0.0.0.0 + # telemetryPort: 8081 diff --git a/charts/rancher-logging/rancher-logging-crd/3.9.400/Chart.yaml b/charts/rancher-logging/rancher-logging-crd/3.9.400/Chart.yaml new file mode 100755 index 000000000..012115ac4 --- /dev/null +++ b/charts/rancher-logging/rancher-logging-crd/3.9.400/Chart.yaml @@ -0,0 +1,10 @@ +annotations: + catalog.cattle.io/certified: rancher + catalog.cattle.io/hidden: "true" + catalog.cattle.io/namespace: cattle-logging-system + catalog.cattle.io/release-name: rancher-logging-crd +apiVersion: v1 +description: Installs the CRDs for rancher-logging. +name: rancher-logging-crd +type: application +version: 3.9.400 diff --git a/charts/rancher-logging/rancher-logging-crd/3.9.400/README.md b/charts/rancher-logging/rancher-logging-crd/3.9.400/README.md new file mode 100755 index 000000000..d4beb54fa --- /dev/null +++ b/charts/rancher-logging/rancher-logging-crd/3.9.400/README.md @@ -0,0 +1,2 @@ +# rancher-logging-crd +A Rancher chart that installs the CRDs used by rancher-logging. diff --git a/charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_clusterflows.yaml b/charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_clusterflows.yaml new file mode 100755 index 000000000..9fc6e22a5 --- /dev/null +++ b/charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_clusterflows.yaml @@ -0,0 +1,765 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.4 + creationTimestamp: null + name: clusterflows.logging.banzaicloud.io +spec: + additionalPrinterColumns: + - JSONPath: .status.active + description: Is the flow active? + name: Active + type: boolean + - JSONPath: .status.problemsCount + description: Number of problems + name: Problems + type: integer + group: logging.banzaicloud.io + names: + categories: + - logging-all + kind: ClusterFlow + listKind: ClusterFlowList + plural: clusterflows + singular: clusterflow + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + filters: + items: + properties: + concat: + properties: + continuous_line_regexp: + type: string + flush_interval: + type: integer + keep_partial_key: + type: boolean + keep_partial_metadata: + type: string + key: + type: string + multiline_end_regexp: + type: string + multiline_start_regexp: + type: string + n_lines: + type: integer + partial_key: + type: string + partial_value: + type: string + separator: + type: string + stream_identity_key: + type: string + timeout_label: + type: string + use_first_timestamp: + type: boolean + use_partial_metadata: + type: string + type: object + dedot: + properties: + de_dot_nested: + type: boolean + de_dot_separator: + type: string + type: object + detectExceptions: + properties: + languages: + items: + type: string + type: array + max_bytes: + type: integer + max_lines: + type: integer + message: + type: string + multiline_flush_interval: + type: string + remove_tag_prefix: + type: string + stream: + type: string + type: object + enhanceK8s: + properties: + api_groups: + items: + type: string + type: array + bearer_token_file: + type: string + ca_file: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + cache_refresh: + type: integer + cache_refresh_variation: + type: integer + cache_size: + type: integer + cache_ttl: + type: integer + client_cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + client_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + core_api_versions: + items: + type: string + type: array + data_type: + type: string + in_namespace_path: + items: + type: string + type: array + in_pod_path: + items: + type: string + type: array + kubernetes_url: + type: string + secret_dir: + type: string + ssl_partial_chain: + type: boolean + verify_ssl: + type: boolean + type: object + geoip: + properties: + backend_library: + type: string + geoip_2_database: + type: string + geoip_database: + type: string + geoip_lookup_keys: + type: string + records: + items: + additionalProperties: + type: string + type: object + type: array + skip_adding_null_record: + type: boolean + type: object + grep: + properties: + and: + items: + properties: + exclude: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + regexp: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + type: object + type: array + exclude: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + or: + items: + properties: + exclude: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + regexp: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + type: object + type: array + regexp: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + type: object + parser: + properties: + emit_invalid_record_to_error: + type: boolean + hash_value_field: + type: string + inject_key_prefix: + type: string + key_name: + type: string + parse: + properties: + delimiter: + type: string + delimiter_pattern: + type: string + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + format_firstline: + type: string + keep_time_key: + type: boolean + label_delimiter: + type: string + local_time: + type: boolean + multiline: + items: + type: string + type: array + null_empty_string: + type: boolean + null_value_pattern: + type: string + patterns: + items: + properties: + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + keep_time_key: + type: boolean + local_time: + type: boolean + null_empty_string: + type: boolean + null_value_pattern: + type: string + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + type: array + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + parsers: + items: + properties: + delimiter: + type: string + delimiter_pattern: + type: string + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + format_firstline: + type: string + keep_time_key: + type: boolean + label_delimiter: + type: string + local_time: + type: boolean + multiline: + items: + type: string + type: array + null_empty_string: + type: boolean + null_value_pattern: + type: string + patterns: + items: + properties: + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + keep_time_key: + type: boolean + local_time: + type: boolean + null_empty_string: + type: boolean + null_value_pattern: + type: string + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + type: array + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + type: array + remove_key_name_field: + type: boolean + replace_invalid_sequence: + type: boolean + reserve_data: + type: boolean + reserve_time: + type: boolean + type: object + prometheus: + properties: + labels: + additionalProperties: + type: string + type: object + metrics: + items: + properties: + buckets: + type: string + desc: + type: string + key: + type: string + labels: + additionalProperties: + type: string + type: object + name: + type: string + type: + type: string + required: + - desc + - name + - type + type: object + type: array + type: object + record_modifier: + properties: + char_encoding: + type: string + prepare_value: + type: string + records: + items: + additionalProperties: + type: string + type: object + type: array + remove_keys: + type: string + replaces: + items: + properties: + expression: + type: string + key: + type: string + replace: + type: string + required: + - expression + - key + - replace + type: object + type: array + whitelist_keys: + type: string + type: object + record_transformer: + properties: + auto_typecast: + type: boolean + enable_ruby: + type: boolean + keep_keys: + type: string + records: + items: + additionalProperties: + type: string + type: object + type: array + remove_keys: + type: string + renew_record: + type: boolean + renew_time_key: + type: string + type: object + stdout: + properties: + output_type: + type: string + type: object + sumologic: + properties: + collector_key_name: + type: string + collector_value: + type: string + exclude_container_regex: + type: string + exclude_facility_regex: + type: string + exclude_host_regex: + type: string + exclude_namespace_regex: + type: string + exclude_pod_regex: + type: string + exclude_priority_regex: + type: string + exclude_unit_regex: + type: string + log_format: + type: string + source_category: + type: string + source_category_key_name: + type: string + source_category_prefix: + type: string + source_category_replace_dash: + type: string + source_host: + type: string + source_host_key_name: + type: string + source_name: + type: string + source_name_key_name: + type: string + tracing_annotation_prefix: + type: string + tracing_container_name: + type: string + tracing_format: + type: boolean + tracing_host: + type: string + tracing_label_prefix: + type: string + tracing_namespace: + type: string + tracing_pod: + type: string + tracing_pod_id: + type: string + type: object + tag_normaliser: + properties: + format: + type: string + type: object + throttle: + properties: + group_bucket_limit: + type: integer + group_bucket_period_s: + type: integer + group_drop_logs: + type: boolean + group_key: + type: string + group_reset_rate_s: + type: integer + group_warning_delay_s: + type: integer + type: object + type: object + type: array + globalOutputRefs: + items: + type: string + type: array + loggingRef: + type: string + match: + items: + properties: + exclude: + properties: + container_names: + items: + type: string + type: array + hosts: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + namespaces: + items: + type: string + type: array + type: object + select: + properties: + container_names: + items: + type: string + type: array + hosts: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + namespaces: + items: + type: string + type: array + type: object + type: object + type: array + outputRefs: + items: + type: string + type: array + selectors: + additionalProperties: + type: string + type: object + type: object + status: + properties: + active: + type: boolean + problems: + items: + type: string + type: array + problemsCount: + type: integer + type: object + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_clusteroutputs.yaml b/charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_clusteroutputs.yaml new file mode 100755 index 000000000..029e28ec8 --- /dev/null +++ b/charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_clusteroutputs.yaml @@ -0,0 +1,4721 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.4 + creationTimestamp: null + name: clusteroutputs.logging.banzaicloud.io +spec: + additionalPrinterColumns: + - JSONPath: .status.active + description: Is the output active? + name: Active + type: boolean + - JSONPath: .status.problemsCount + description: Number of problems + name: Problems + type: integer + group: logging.banzaicloud.io + names: + categories: + - logging-all + kind: ClusterOutput + listKind: ClusterOutputList + plural: clusteroutputs + singular: clusteroutput + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + awsElasticsearch: + properties: + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + endpoint: + properties: + access_key_id: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + assume_role_arn: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + assume_role_session_name: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + assume_role_web_identity_token_file: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + ecs_container_credentials_relative_uri: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + region: + type: string + secret_access_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + sts_credentials_region: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + url: + type: string + type: object + flush_interval: + type: string + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + include_tag_key: + type: boolean + logstash_format: + type: boolean + tag_key: + type: string + type: object + azurestorage: + properties: + auto_create_container: + type: boolean + azure_container: + type: string + azure_imds_api_version: + type: string + azure_object_key_format: + type: string + azure_storage_access_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + azure_storage_account: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + azure_storage_sas_token: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + format: + type: string + path: + type: string + required: + - azure_container + - azure_storage_access_key + - azure_storage_account + - azure_storage_sas_token + type: object + cloudwatch: + properties: + auto_create_stream: + type: boolean + aws_instance_profile_credentials_retries: + type: integer + aws_key_id: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + aws_sec_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + aws_sts_role_arn: + type: string + aws_sts_session_name: + type: string + aws_use_sts: + type: boolean + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + concurrency: + type: integer + endpoint: + type: string + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + http_proxy: + type: string + include_time_key: + type: boolean + json_handler: + type: string + localtime: + type: boolean + log_group_aws_tags: + type: string + log_group_aws_tags_key: + type: string + log_group_name: + type: string + log_group_name_key: + type: string + log_rejected_request: + type: string + log_stream_name: + type: string + log_stream_name_key: + type: string + max_events_per_batch: + type: integer + max_message_length: + type: integer + message_keys: + type: string + put_log_events_disable_retry_limit: + type: boolean + put_log_events_retry_limit: + type: integer + put_log_events_retry_wait: + type: string + region: + type: string + remove_log_group_aws_tags_key: + type: string + remove_log_group_name_key: + type: string + remove_log_stream_name_key: + type: string + remove_retention_in_days: + type: string + retention_in_days: + type: string + retention_in_days_key: + type: string + use_tag_as_group: + type: boolean + use_tag_as_stream: + type: boolean + required: + - region + type: object + datadog: + properties: + api_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + compression_level: + type: string + dd_hostname: + type: string + dd_source: + type: string + dd_sourcecategory: + type: string + dd_tags: + type: string + host: + type: string + include_tag_key: + type: boolean + max_backoff: + type: string + max_retries: + type: string + no_ssl_validation: + type: boolean + port: + type: string + service: + type: string + ssl_port: + type: string + tag_key: + type: string + timestamp_key: + type: string + use_compression: + type: boolean + use_http: + type: boolean + use_json: + type: boolean + use_ssl: + type: boolean + required: + - api_key + type: object + elasticsearch: + properties: + application_name: + type: string + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + bulk_message_request_threshold: + type: string + ca_file: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + client_cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + client_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + client_key_pass: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + content_type: + type: string + custom_headers: + type: string + customize_template: + type: string + default_elasticsearch_version: + type: string + deflector_alias: + type: string + enable_ilm: + type: boolean + exception_backup: + type: boolean + fail_on_putting_template_retry_exceed: + type: boolean + flatten_hashes: + type: boolean + flatten_hashes_separator: + type: string + host: + type: string + hosts: + type: string + http_backend: + type: string + id_key: + type: string + ignore_exceptions: + type: string + ilm_policy: + type: string + ilm_policy_id: + type: string + ilm_policy_overwrite: + type: boolean + include_index_in_url: + type: boolean + include_tag_key: + type: boolean + include_timestamp: + type: boolean + index_date_pattern: + type: string + index_name: + type: string + index_prefix: + type: string + log_es_400_reason: + type: boolean + logstash_dateformat: + type: string + logstash_format: + type: boolean + logstash_prefix: + type: string + logstash_prefix_separator: + type: string + max_retry_get_es_version: + type: string + max_retry_putting_template: + type: string + password: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + path: + type: string + pipeline: + type: string + port: + type: integer + prefer_oj_serializer: + type: boolean + reconnect_on_error: + type: boolean + reload_after: + type: string + reload_connections: + type: boolean + reload_on_failure: + type: boolean + remove_keys_on_update: + type: string + remove_keys_on_update_key: + type: string + request_timeout: + type: string + resurrect_after: + type: string + retry_tag: + type: string + rollover_index: + type: boolean + routing_key: + type: string + scheme: + type: string + sniffer_class_name: + type: string + ssl_max_version: + type: string + ssl_min_version: + type: string + ssl_verify: + type: boolean + ssl_version: + type: string + suppress_doc_wrap: + type: boolean + suppress_type_name: + type: boolean + tag_key: + type: string + target_index_key: + type: string + target_type_key: + type: string + template_file: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + template_name: + type: string + template_overwrite: + type: boolean + templates: + type: string + time_key: + type: string + time_key_format: + type: string + time_parse_error_tag: + type: string + time_precision: + type: string + type_name: + type: string + unrecoverable_error_types: + type: string + user: + type: string + utc_index: + type: boolean + validate_client_version: + type: boolean + verify_es_version_at_startup: + type: boolean + with_transporter_log: + type: boolean + write_operation: + type: string + type: object + enabledNamespaces: + items: + type: string + type: array + file: + properties: + add_path_suffix: + type: boolean + append: + type: boolean + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + path: + type: string + path_suffix: + type: string + symlink_path: + type: boolean + required: + - path + type: object + forward: + properties: + ack_response_timeout: + type: integer + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + connect_timeout: + type: integer + dns_round_robin: + type: boolean + expire_dns_cache: + type: integer + hard_timeout: + type: integer + heartbeat_interval: + type: integer + heartbeat_type: + type: string + ignore_network_errors_at_startup: + type: boolean + keepalive: + type: boolean + keepalive_timeout: + type: integer + phi_failure_detector: + type: boolean + phi_threshold: + type: integer + recover_wait: + type: integer + require_ack_response: + type: boolean + security: + properties: + allow_anonymous_source: + type: boolean + self_hostname: + type: string + shared_key: + type: string + user_auth: + type: boolean + required: + - self_hostname + - shared_key + type: object + send_timeout: + type: integer + servers: + items: + properties: + host: + type: string + name: + type: string + password: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + port: + type: integer + shared_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + standby: + type: boolean + username: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + weight: + type: integer + required: + - host + type: object + type: array + tls_allow_self_signed_cert: + type: boolean + tls_cert_logical_store_name: + type: string + tls_cert_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_cert_thumbprint: + type: string + tls_cert_use_enterprise_store: + type: boolean + tls_ciphers: + type: string + tls_client_cert_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_client_private_key_passphrase: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_client_private_key_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_insecure_mode: + type: boolean + tls_verify_hostname: + type: boolean + tls_version: + type: string + verify_connection_at_startup: + type: boolean + required: + - servers + type: object + gcs: + properties: + acl: + type: string + auto_create_bucket: + type: boolean + bucket: + type: string + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + client_retries: + type: integer + client_timeout: + type: integer + credentials_json: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + encryption_key: + type: string + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + hex_random_length: + type: integer + keyfile: + type: string + object_key_format: + type: string + object_metadata: + items: + properties: + key: + type: string + value: + type: string + required: + - key + - value + type: object + type: array + overwrite: + type: boolean + path: + type: string + project: + type: string + storage_class: + type: string + store_as: + type: string + transcoding: + type: boolean + required: + - bucket + - project + type: object + gelf: + properties: + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + host: + type: string + port: + type: integer + protocol: + type: string + tls: + type: boolean + tls_options: + additionalProperties: + type: string + type: object + required: + - host + - port + type: object + http: + properties: + auth: + properties: + password: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + username: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + required: + - password + - username + type: object + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + content_type: + type: string + endpoint: + type: string + error_response_as_unrecoverable: + type: boolean + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + headers: + additionalProperties: + type: string + type: object + http_method: + type: string + json_array: + type: boolean + open_timeout: + type: integer + proxy: + type: string + read_timeout: + type: integer + retryable_response_codes: + items: + type: integer + type: array + ssl_timeout: + type: integer + tls_ca_cert_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_ciphers: + type: string + tls_client_cert_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_private_key_passphrase: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_private_key_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_verify_mode: + type: string + tls_version: + type: string + required: + - endpoint + type: object + kafka: + properties: + ack_timeout: + type: integer + brokers: + type: string + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + compression_codec: + type: string + default_message_key: + type: string + default_partition_key: + type: string + default_topic: + type: string + exclude_partion_key: + type: boolean + exclude_topic_key: + type: boolean + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + get_kafka_client_log: + type: boolean + headers: + additionalProperties: + type: string + type: object + headers_from_record: + additionalProperties: + type: string + type: object + idempotent: + type: boolean + max_send_retries: + type: integer + message_key_key: + type: string + partition_key: + type: string + partition_key_key: + type: string + password: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + required_acks: + type: integer + sasl_over_ssl: + type: boolean + scram_mechanism: + type: string + ssl_ca_cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + ssl_ca_certs_from_system: + type: boolean + ssl_client_cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + ssl_client_cert_chain: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + ssl_client_cert_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + ssl_verify_hostname: + type: boolean + topic_key: + type: string + use_default_for_unknown_topic: + type: boolean + username: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + required: + - brokers + - format + type: object + kinesisStream: + properties: + assume_role_credentials: + properties: + duration_seconds: + type: string + external_id: + type: string + policy: + type: string + role_arn: + type: string + role_session_name: + type: string + required: + - role_arn + - role_session_name + type: object + aws_iam_retries: + type: integer + aws_key_id: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + aws_sec_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + aws_ses_token: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + batch_request_max_count: + type: integer + batch_request_max_size: + type: integer + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + partition_key: + type: string + process_credentials: + properties: + process: + type: string + required: + - process + type: object + region: + type: string + reset_backoff_if_success: + type: boolean + retries_on_batch_request: + type: integer + stream_name: + type: string + required: + - stream_name + type: object + logdna: + properties: + api_key: + type: string + app: + type: string + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + hostname: + type: string + ingester_domain: + type: string + ingester_endpoint: + type: string + request_timeout: + type: string + tags: + type: string + required: + - api_key + - hostname + type: object + loggingRef: + type: string + logz: + properties: + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + endpoint: + properties: + port: + type: integer + token: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + url: + type: string + type: object + gzip: + type: boolean + http_idle_timeout: + type: integer + output_include_tags: + type: boolean + output_include_time: + type: boolean + retry_count: + type: integer + retry_sleep: + type: integer + required: + - endpoint + type: object + loki: + properties: + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + ca_cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + configure_kubernetes_labels: + type: boolean + drop_single_key: + type: boolean + extra_labels: + additionalProperties: + type: string + type: object + extract_kubernetes_labels: + type: boolean + insecure_tls: + type: boolean + key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + labels: + additionalProperties: + type: string + type: object + line_format: + type: string + password: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + remove_keys: + items: + type: string + type: array + tenant: + type: string + url: + type: string + username: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + type: object + newrelic: + properties: + api_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + base_uri: + type: string + license_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + type: object + nullout: + type: object + oss: + properties: + aaccess_key_secret: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + access_key_id: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + auto_create_bucket: + type: boolean + bucket: + type: string + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + check_bucket: + type: boolean + check_object: + type: boolean + download_crc_enable: + type: boolean + endpoint: + type: string + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + hex_random_length: + type: integer + index_format: + type: string + key_format: + type: string + open_timeout: + type: integer + oss_sdk_log_dir: + type: string + overwrite: + type: boolean + path: + type: string + read_timeout: + type: integer + store_as: + type: string + upload_crc_enable: + type: boolean + warn_for_delay: + type: string + required: + - aaccess_key_secret + - access_key_id + - bucket + - endpoint + type: object + redis: + properties: + allow_duplicate_key: + type: boolean + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + db_number: + type: integer + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + host: + type: string + insert_key_prefix: + type: string + password: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + port: + type: integer + strftime_format: + type: string + ttl: + type: integer + type: object + s3: + properties: + acl: + type: string + assume_role_credentials: + properties: + duration_seconds: + type: string + external_id: + type: string + policy: + type: string + role_arn: + type: string + role_session_name: + type: string + required: + - role_arn + - role_session_name + type: object + auto_create_bucket: + type: string + aws_iam_retries: + type: string + aws_key_id: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + aws_sec_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + check_apikey_on_start: + type: string + check_bucket: + type: string + check_object: + type: string + clustername: + type: string + compute_checksums: + type: string + enable_transfer_acceleration: + type: string + force_path_style: + type: string + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + grant_full_control: + type: string + grant_read: + type: string + grant_read_acp: + type: string + grant_write_acp: + type: string + hex_random_length: + type: string + index_format: + type: string + instance_profile_credentials: + properties: + http_open_timeout: + type: string + http_read_timeout: + type: string + ip_address: + type: string + port: + type: string + retries: + type: string + type: object + oneeye_format: + type: boolean + overwrite: + type: string + path: + type: string + proxy_uri: + type: string + s3_bucket: + type: string + s3_endpoint: + type: string + s3_metadata: + type: string + s3_object_key_format: + type: string + s3_region: + type: string + shared_credentials: + properties: + path: + type: string + profile_name: + type: string + type: object + signature_version: + type: string + sse_customer_algorithm: + type: string + sse_customer_key: + type: string + sse_customer_key_md5: + type: string + ssekms_key_id: + type: string + ssl_verify_peer: + type: string + storage_class: + type: string + store_as: + type: string + use_bundled_cert: + type: string + use_server_side_encryption: + type: string + warn_for_delay: + type: string + required: + - s3_bucket + type: object + splunkHec: + properties: + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + ca_file: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + ca_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + client_cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + client_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + coerce_to_utf8: + type: boolean + data_type: + type: string + fields: + additionalProperties: + type: string + type: object + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + hec_host: + type: string + hec_port: + type: integer + hec_token: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + host: + type: string + host_key: + type: string + idle_timeout: + type: integer + index: + type: string + index_key: + type: string + insecure_ssl: + type: boolean + keep_keys: + type: boolean + metric_name_key: + type: string + metric_value_key: + type: string + metrics_from_event: + type: boolean + non_utf8_replacement_string: + type: string + open_timeout: + type: integer + protocol: + type: string + read_timeout: + type: integer + source: + type: string + source_key: + type: string + sourcetype: + type: string + sourcetype_key: + type: string + ssl_ciphers: + type: string + required: + - hec_host + - hec_token + type: object + sumologic: + properties: + add_timestamp: + type: boolean + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + compress: + type: boolean + compress_encoding: + type: string + custom_dimensions: + type: string + custom_fields: + items: + type: string + type: array + data_type: + type: string + delimiter: + type: string + disable_cookies: + type: boolean + endpoint: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + log_format: + type: string + log_key: + type: string + metric_data_format: + type: string + open_timeout: + type: integer + proxy_uri: + type: string + source_category: + type: string + source_host: + type: string + source_name: + type: string + source_name_key: + type: string + sumo_client: + type: string + timestamp_key: + type: string + verify_ssl: + type: boolean + required: + - endpoint + - source_name + type: object + syslog: + properties: + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + format: + properties: + app_name_field: + type: string + hostname_field: + type: string + log_field: + type: string + message_id_field: + type: string + proc_id_field: + type: string + rfc6587_message_size: + type: boolean + structured_data_field: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + host: + type: string + insecure: + type: boolean + port: + type: integer + transport: + type: string + trusted_ca_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + required: + - host + type: object + type: object + status: + properties: + active: + type: boolean + problems: + items: + type: string + type: array + problemsCount: + type: integer + type: object + required: + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_flows.yaml b/charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_flows.yaml new file mode 100755 index 000000000..a01a1331d --- /dev/null +++ b/charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_flows.yaml @@ -0,0 +1,761 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.4 + creationTimestamp: null + name: flows.logging.banzaicloud.io +spec: + additionalPrinterColumns: + - JSONPath: .status.active + description: Is the flow active? + name: Active + type: boolean + - JSONPath: .status.problemsCount + description: Number of problems + name: Problems + type: integer + group: logging.banzaicloud.io + names: + categories: + - logging-all + kind: Flow + listKind: FlowList + plural: flows + singular: flow + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + filters: + items: + properties: + concat: + properties: + continuous_line_regexp: + type: string + flush_interval: + type: integer + keep_partial_key: + type: boolean + keep_partial_metadata: + type: string + key: + type: string + multiline_end_regexp: + type: string + multiline_start_regexp: + type: string + n_lines: + type: integer + partial_key: + type: string + partial_value: + type: string + separator: + type: string + stream_identity_key: + type: string + timeout_label: + type: string + use_first_timestamp: + type: boolean + use_partial_metadata: + type: string + type: object + dedot: + properties: + de_dot_nested: + type: boolean + de_dot_separator: + type: string + type: object + detectExceptions: + properties: + languages: + items: + type: string + type: array + max_bytes: + type: integer + max_lines: + type: integer + message: + type: string + multiline_flush_interval: + type: string + remove_tag_prefix: + type: string + stream: + type: string + type: object + enhanceK8s: + properties: + api_groups: + items: + type: string + type: array + bearer_token_file: + type: string + ca_file: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + cache_refresh: + type: integer + cache_refresh_variation: + type: integer + cache_size: + type: integer + cache_ttl: + type: integer + client_cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + client_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + core_api_versions: + items: + type: string + type: array + data_type: + type: string + in_namespace_path: + items: + type: string + type: array + in_pod_path: + items: + type: string + type: array + kubernetes_url: + type: string + secret_dir: + type: string + ssl_partial_chain: + type: boolean + verify_ssl: + type: boolean + type: object + geoip: + properties: + backend_library: + type: string + geoip_2_database: + type: string + geoip_database: + type: string + geoip_lookup_keys: + type: string + records: + items: + additionalProperties: + type: string + type: object + type: array + skip_adding_null_record: + type: boolean + type: object + grep: + properties: + and: + items: + properties: + exclude: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + regexp: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + type: object + type: array + exclude: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + or: + items: + properties: + exclude: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + regexp: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + type: object + type: array + regexp: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + type: object + parser: + properties: + emit_invalid_record_to_error: + type: boolean + hash_value_field: + type: string + inject_key_prefix: + type: string + key_name: + type: string + parse: + properties: + delimiter: + type: string + delimiter_pattern: + type: string + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + format_firstline: + type: string + keep_time_key: + type: boolean + label_delimiter: + type: string + local_time: + type: boolean + multiline: + items: + type: string + type: array + null_empty_string: + type: boolean + null_value_pattern: + type: string + patterns: + items: + properties: + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + keep_time_key: + type: boolean + local_time: + type: boolean + null_empty_string: + type: boolean + null_value_pattern: + type: string + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + type: array + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + parsers: + items: + properties: + delimiter: + type: string + delimiter_pattern: + type: string + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + format_firstline: + type: string + keep_time_key: + type: boolean + label_delimiter: + type: string + local_time: + type: boolean + multiline: + items: + type: string + type: array + null_empty_string: + type: boolean + null_value_pattern: + type: string + patterns: + items: + properties: + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + keep_time_key: + type: boolean + local_time: + type: boolean + null_empty_string: + type: boolean + null_value_pattern: + type: string + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + type: array + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + type: array + remove_key_name_field: + type: boolean + replace_invalid_sequence: + type: boolean + reserve_data: + type: boolean + reserve_time: + type: boolean + type: object + prometheus: + properties: + labels: + additionalProperties: + type: string + type: object + metrics: + items: + properties: + buckets: + type: string + desc: + type: string + key: + type: string + labels: + additionalProperties: + type: string + type: object + name: + type: string + type: + type: string + required: + - desc + - name + - type + type: object + type: array + type: object + record_modifier: + properties: + char_encoding: + type: string + prepare_value: + type: string + records: + items: + additionalProperties: + type: string + type: object + type: array + remove_keys: + type: string + replaces: + items: + properties: + expression: + type: string + key: + type: string + replace: + type: string + required: + - expression + - key + - replace + type: object + type: array + whitelist_keys: + type: string + type: object + record_transformer: + properties: + auto_typecast: + type: boolean + enable_ruby: + type: boolean + keep_keys: + type: string + records: + items: + additionalProperties: + type: string + type: object + type: array + remove_keys: + type: string + renew_record: + type: boolean + renew_time_key: + type: string + type: object + stdout: + properties: + output_type: + type: string + type: object + sumologic: + properties: + collector_key_name: + type: string + collector_value: + type: string + exclude_container_regex: + type: string + exclude_facility_regex: + type: string + exclude_host_regex: + type: string + exclude_namespace_regex: + type: string + exclude_pod_regex: + type: string + exclude_priority_regex: + type: string + exclude_unit_regex: + type: string + log_format: + type: string + source_category: + type: string + source_category_key_name: + type: string + source_category_prefix: + type: string + source_category_replace_dash: + type: string + source_host: + type: string + source_host_key_name: + type: string + source_name: + type: string + source_name_key_name: + type: string + tracing_annotation_prefix: + type: string + tracing_container_name: + type: string + tracing_format: + type: boolean + tracing_host: + type: string + tracing_label_prefix: + type: string + tracing_namespace: + type: string + tracing_pod: + type: string + tracing_pod_id: + type: string + type: object + tag_normaliser: + properties: + format: + type: string + type: object + throttle: + properties: + group_bucket_limit: + type: integer + group_bucket_period_s: + type: integer + group_drop_logs: + type: boolean + group_key: + type: string + group_reset_rate_s: + type: integer + group_warning_delay_s: + type: integer + type: object + type: object + type: array + globalOutputRefs: + items: + type: string + type: array + localOutputRefs: + items: + type: string + type: array + loggingRef: + type: string + match: + items: + properties: + exclude: + properties: + container_names: + items: + type: string + type: array + hosts: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + type: object + select: + properties: + container_names: + items: + type: string + type: array + hosts: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + type: object + type: object + type: array + outputRefs: + items: + type: string + type: array + selectors: + additionalProperties: + type: string + type: object + type: object + status: + properties: + active: + type: boolean + problems: + items: + type: string + type: array + problemsCount: + type: integer + type: object + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_loggings.yaml b/charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_loggings.yaml new file mode 100755 index 000000000..9d9c20fa1 --- /dev/null +++ b/charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_loggings.yaml @@ -0,0 +1,7095 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.4 + creationTimestamp: null + name: loggings.logging.banzaicloud.io +spec: + group: logging.banzaicloud.io + names: + categories: + - logging-all + kind: Logging + listKind: LoggingList + plural: loggings + singular: logging + preserveUnknownFields: false + scope: Cluster + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + allowClusterResourcesFromAllNamespaces: + type: boolean + controlNamespace: + type: string + defaultFlow: + properties: + filters: + items: + properties: + concat: + properties: + continuous_line_regexp: + type: string + flush_interval: + type: integer + keep_partial_key: + type: boolean + keep_partial_metadata: + type: string + key: + type: string + multiline_end_regexp: + type: string + multiline_start_regexp: + type: string + n_lines: + type: integer + partial_key: + type: string + partial_value: + type: string + separator: + type: string + stream_identity_key: + type: string + timeout_label: + type: string + use_first_timestamp: + type: boolean + use_partial_metadata: + type: string + type: object + dedot: + properties: + de_dot_nested: + type: boolean + de_dot_separator: + type: string + type: object + detectExceptions: + properties: + languages: + items: + type: string + type: array + max_bytes: + type: integer + max_lines: + type: integer + message: + type: string + multiline_flush_interval: + type: string + remove_tag_prefix: + type: string + stream: + type: string + type: object + enhanceK8s: + properties: + api_groups: + items: + type: string + type: array + bearer_token_file: + type: string + ca_file: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + cache_refresh: + type: integer + cache_refresh_variation: + type: integer + cache_size: + type: integer + cache_ttl: + type: integer + client_cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + client_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + core_api_versions: + items: + type: string + type: array + data_type: + type: string + in_namespace_path: + items: + type: string + type: array + in_pod_path: + items: + type: string + type: array + kubernetes_url: + type: string + secret_dir: + type: string + ssl_partial_chain: + type: boolean + verify_ssl: + type: boolean + type: object + geoip: + properties: + backend_library: + type: string + geoip_2_database: + type: string + geoip_database: + type: string + geoip_lookup_keys: + type: string + records: + items: + additionalProperties: + type: string + type: object + type: array + skip_adding_null_record: + type: boolean + type: object + grep: + properties: + and: + items: + properties: + exclude: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + regexp: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + type: object + type: array + exclude: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + or: + items: + properties: + exclude: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + regexp: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + type: object + type: array + regexp: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + type: object + parser: + properties: + emit_invalid_record_to_error: + type: boolean + hash_value_field: + type: string + inject_key_prefix: + type: string + key_name: + type: string + parse: + properties: + delimiter: + type: string + delimiter_pattern: + type: string + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + format_firstline: + type: string + keep_time_key: + type: boolean + label_delimiter: + type: string + local_time: + type: boolean + multiline: + items: + type: string + type: array + null_empty_string: + type: boolean + null_value_pattern: + type: string + patterns: + items: + properties: + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + keep_time_key: + type: boolean + local_time: + type: boolean + null_empty_string: + type: boolean + null_value_pattern: + type: string + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + type: array + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + parsers: + items: + properties: + delimiter: + type: string + delimiter_pattern: + type: string + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + format_firstline: + type: string + keep_time_key: + type: boolean + label_delimiter: + type: string + local_time: + type: boolean + multiline: + items: + type: string + type: array + null_empty_string: + type: boolean + null_value_pattern: + type: string + patterns: + items: + properties: + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + keep_time_key: + type: boolean + local_time: + type: boolean + null_empty_string: + type: boolean + null_value_pattern: + type: string + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + type: array + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + type: array + remove_key_name_field: + type: boolean + replace_invalid_sequence: + type: boolean + reserve_data: + type: boolean + reserve_time: + type: boolean + type: object + prometheus: + properties: + labels: + additionalProperties: + type: string + type: object + metrics: + items: + properties: + buckets: + type: string + desc: + type: string + key: + type: string + labels: + additionalProperties: + type: string + type: object + name: + type: string + type: + type: string + required: + - desc + - name + - type + type: object + type: array + type: object + record_modifier: + properties: + char_encoding: + type: string + prepare_value: + type: string + records: + items: + additionalProperties: + type: string + type: object + type: array + remove_keys: + type: string + replaces: + items: + properties: + expression: + type: string + key: + type: string + replace: + type: string + required: + - expression + - key + - replace + type: object + type: array + whitelist_keys: + type: string + type: object + record_transformer: + properties: + auto_typecast: + type: boolean + enable_ruby: + type: boolean + keep_keys: + type: string + records: + items: + additionalProperties: + type: string + type: object + type: array + remove_keys: + type: string + renew_record: + type: boolean + renew_time_key: + type: string + type: object + stdout: + properties: + output_type: + type: string + type: object + sumologic: + properties: + collector_key_name: + type: string + collector_value: + type: string + exclude_container_regex: + type: string + exclude_facility_regex: + type: string + exclude_host_regex: + type: string + exclude_namespace_regex: + type: string + exclude_pod_regex: + type: string + exclude_priority_regex: + type: string + exclude_unit_regex: + type: string + log_format: + type: string + source_category: + type: string + source_category_key_name: + type: string + source_category_prefix: + type: string + source_category_replace_dash: + type: string + source_host: + type: string + source_host_key_name: + type: string + source_name: + type: string + source_name_key_name: + type: string + tracing_annotation_prefix: + type: string + tracing_container_name: + type: string + tracing_format: + type: boolean + tracing_host: + type: string + tracing_label_prefix: + type: string + tracing_namespace: + type: string + tracing_pod: + type: string + tracing_pod_id: + type: string + type: object + tag_normaliser: + properties: + format: + type: string + type: object + throttle: + properties: + group_bucket_limit: + type: integer + group_bucket_period_s: + type: integer + group_drop_logs: + type: boolean + group_key: + type: string + group_reset_rate_s: + type: integer + group_warning_delay_s: + type: integer + type: object + type: object + type: array + globalOutputRefs: + items: + type: string + type: array + outputRefs: + items: + type: string + type: array + type: object + enableRecreateWorkloadOnImmutableFieldChange: + type: boolean + flowConfigCheckDisabled: + type: boolean + flowConfigOverride: + type: string + fluentbit: + properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + type: object + bufferStorage: + properties: + storage.backlog.mem_limit: + type: string + storage.checksum: + type: string + storage.path: + type: string + storage.sync: + type: string + type: object + bufferStorageVolume: + properties: + emptyDir: + properties: + medium: + type: string + sizeLimit: + type: string + type: object + host_path: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + pvc: + properties: + source: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + storageClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + type: object + type: object + coroStackSize: + format: int32 + type: integer + customConfigSecret: + type: string + disableKubernetesFilter: + type: boolean + enableUpstream: + type: boolean + extraVolumeMounts: + items: + properties: + destination: + pattern: ^/.+$ + type: string + readOnly: + type: boolean + source: + pattern: ^/.+$ + type: string + required: + - destination + - source + type: object + type: array + filterAws: + properties: + Match: + type: string + account_id: + type: boolean + ami_id: + type: boolean + az: + type: boolean + ec2_instance_id: + type: boolean + ec2_instance_type: + type: boolean + hostname: + type: boolean + imds_version: + type: string + private_ip: + type: boolean + vpc_id: + type: boolean + type: object + filterKubernetes: + properties: + Annotations: + type: string + Buffer_Size: + type: string + Dummy_Meta: + type: string + K8S-Logging.Exclude: + type: string + K8S-Logging.Parser: + type: string + Keep_Log: + type: string + Kube_CA_File: + type: string + Kube_CA_Path: + type: string + Kube_Tag_Prefix: + type: string + Kube_Token_File: + type: string + Kube_URL: + type: string + Kube_meta_preload_cache_dir: + type: string + Labels: + type: string + Match: + type: string + Merge_Log: + type: string + Merge_Log_Key: + type: string + Merge_Log_Trim: + type: string + Merge_Parser: + type: string + Regex_Parser: + type: string + Use_Journal: + type: string + tls.debug: + type: string + tls.verify: + type: string + type: object + flush: + format: int32 + type: integer + forwardOptions: + properties: + Require_ack_response: + type: boolean + Retry_Limit: + type: string + Send_options: + type: boolean + Tag: + type: string + Time_as_Integer: + type: boolean + type: object + grace: + format: int32 + type: integer + image: + properties: + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + pullPolicy: + type: string + repository: + type: string + tag: + type: string + type: object + inputTail: + properties: + Buffer_Chunk_Size: + type: string + Buffer_Max_Size: + type: string + DB: + type: string + DB_Sync: + type: string + Docker_Mode: + type: string + Docker_Mode_Flush: + type: string + Exclude_Path: + type: string + Ignore_Older: + type: string + Key: + type: string + Mem_Buf_Limit: + type: string + Multiline: + type: string + Multiline_Flush: + type: string + Parser: + type: string + Parser_Firstline: + type: string + Parser_N: + items: + type: string + type: array + Path: + type: string + Path_Key: + type: string + Refresh_Interval: + type: string + Rotate_Wait: + type: string + Skip_Long_Lines: + type: string + Tag: + type: string + Tag_Regex: + type: string + storage.type: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + livenessDefaultCheck: + type: boolean + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + logLevel: + type: string + metrics: + properties: + interval: + type: string + path: + type: string + port: + format: int32 + type: integer + prometheusAnnotations: + type: boolean + serviceMonitor: + type: boolean + serviceMonitorConfig: + properties: + additionalLabels: + additionalProperties: + type: string + type: object + honorLabels: + type: boolean + metricRelabelings: + items: + properties: + action: + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + type: string + type: array + targetLabel: + type: string + type: object + type: array + relabelings: + items: + properties: + action: + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + type: string + type: array + targetLabel: + type: string + type: object + type: array + type: object + timeout: + type: string + type: object + mountPath: + type: string + network: + properties: + connectTimeout: + format: int32 + type: integer + keepalive: + type: boolean + keepaliveIdleTimeout: + format: int32 + type: integer + keepaliveMaxRecycle: + format: int32 + type: integer + type: object + nodeSelector: + additionalProperties: + type: string + type: object + parser: + type: string + podPriorityClassName: + type: string + position_db: + properties: + emptyDir: + properties: + medium: + type: string + sizeLimit: + type: string + type: object + host_path: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + pvc: + properties: + source: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + storageClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + type: object + type: object + positiondb: + properties: + emptyDir: + properties: + medium: + type: string + sizeLimit: + type: string + type: object + host_path: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + pvc: + properties: + source: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + storageClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + type: object + type: object + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + security: + properties: + podSecurityContext: + properties: + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + podSecurityPolicyCreate: + type: boolean + roleBasedAccessControlCreate: + type: boolean + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + type: object + targetHost: + type: string + targetPort: + format: int32 + type: integer + tls: + properties: + enabled: + type: boolean + secretName: + type: string + sharedKey: + type: string + required: + - enabled + type: object + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + type: object + fluentd: + properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + annotations: + additionalProperties: + type: string + type: object + bufferStorageVolume: + properties: + emptyDir: + properties: + medium: + type: string + sizeLimit: + type: string + type: object + host_path: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + pvc: + properties: + source: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + storageClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + type: object + type: object + configCheckAnnotations: + additionalProperties: + type: string + type: object + configReloaderImage: + properties: + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + pullPolicy: + type: string + repository: + type: string + tag: + type: string + type: object + disablePvc: + type: boolean + fluentLogDestination: + type: string + fluentOutLogrotate: + properties: + age: + type: string + enabled: + type: boolean + path: + type: string + size: + type: string + required: + - enabled + type: object + fluentdPvcSpec: + properties: + emptyDir: + properties: + medium: + type: string + sizeLimit: + type: string + type: object + host_path: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + pvc: + properties: + source: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + storageClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + type: object + type: object + forwardInputConfig: + properties: + add_tag_prefix: + type: string + bind: + type: string + chunk_size_limit: + type: string + chunk_size_warn_limit: + type: string + deny_keepalive: + type: boolean + linger_timeout: + type: integer + port: + type: string + resolve_hostname: + type: boolean + security: + properties: + allow_anonymous_source: + type: boolean + self_hostname: + type: string + shared_key: + type: string + user_auth: + type: boolean + required: + - self_hostname + - shared_key + type: object + send_keepalive_packet: + type: boolean + skip_invalid_event: + type: boolean + source_address_key: + type: string + sourceHostnameKey: + type: string + tag: + type: string + transport: + properties: + ca_cert_path: + type: string + ca_path: + type: string + ca_private_key_passphrase: + type: string + ca_private_key_path: + type: string + cert_path: + type: string + ciphers: + type: string + client_cert_auth: + type: boolean + insecure: + type: boolean + private_key_passphrase: + type: string + private_key_path: + type: string + protocol: + type: string + version: + type: string + type: object + type: object + ignoreRepeatedLogInterval: + type: string + ignoreSameLogInterval: + type: string + image: + properties: + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + pullPolicy: + type: string + repository: + type: string + tag: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + livenessDefaultCheck: + type: boolean + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + logLevel: + type: string + metrics: + properties: + interval: + type: string + path: + type: string + port: + format: int32 + type: integer + prometheusAnnotations: + type: boolean + serviceMonitor: + type: boolean + serviceMonitorConfig: + properties: + additionalLabels: + additionalProperties: + type: string + type: object + honorLabels: + type: boolean + metricRelabelings: + items: + properties: + action: + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + type: string + type: array + targetLabel: + type: string + type: object + type: array + relabelings: + items: + properties: + action: + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + type: string + type: array + targetLabel: + type: string + type: object + type: array + type: object + timeout: + type: string + type: object + nodeSelector: + additionalProperties: + type: string + type: object + podPriorityClassName: + type: string + port: + format: int32 + type: integer + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + rootDir: + type: string + scaling: + properties: + podManagementPolicy: + type: string + replicas: + type: integer + type: object + security: + properties: + podSecurityContext: + properties: + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + podSecurityPolicyCreate: + type: boolean + roleBasedAccessControlCreate: + type: boolean + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + type: object + tls: + properties: + enabled: + type: boolean + secretName: + type: string + sharedKey: + type: string + required: + - enabled + type: object + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + volumeModImage: + properties: + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + pullPolicy: + type: string + repository: + type: string + tag: + type: string + type: object + volumeMountChmod: + type: boolean + workers: + format: int32 + type: integer + type: object + globalFilters: + items: + properties: + concat: + properties: + continuous_line_regexp: + type: string + flush_interval: + type: integer + keep_partial_key: + type: boolean + keep_partial_metadata: + type: string + key: + type: string + multiline_end_regexp: + type: string + multiline_start_regexp: + type: string + n_lines: + type: integer + partial_key: + type: string + partial_value: + type: string + separator: + type: string + stream_identity_key: + type: string + timeout_label: + type: string + use_first_timestamp: + type: boolean + use_partial_metadata: + type: string + type: object + dedot: + properties: + de_dot_nested: + type: boolean + de_dot_separator: + type: string + type: object + detectExceptions: + properties: + languages: + items: + type: string + type: array + max_bytes: + type: integer + max_lines: + type: integer + message: + type: string + multiline_flush_interval: + type: string + remove_tag_prefix: + type: string + stream: + type: string + type: object + enhanceK8s: + properties: + api_groups: + items: + type: string + type: array + bearer_token_file: + type: string + ca_file: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + cache_refresh: + type: integer + cache_refresh_variation: + type: integer + cache_size: + type: integer + cache_ttl: + type: integer + client_cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + client_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + core_api_versions: + items: + type: string + type: array + data_type: + type: string + in_namespace_path: + items: + type: string + type: array + in_pod_path: + items: + type: string + type: array + kubernetes_url: + type: string + secret_dir: + type: string + ssl_partial_chain: + type: boolean + verify_ssl: + type: boolean + type: object + geoip: + properties: + backend_library: + type: string + geoip_2_database: + type: string + geoip_database: + type: string + geoip_lookup_keys: + type: string + records: + items: + additionalProperties: + type: string + type: object + type: array + skip_adding_null_record: + type: boolean + type: object + grep: + properties: + and: + items: + properties: + exclude: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + regexp: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + type: object + type: array + exclude: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + or: + items: + properties: + exclude: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + regexp: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + type: object + type: array + regexp: + items: + properties: + key: + type: string + pattern: + type: string + required: + - key + - pattern + type: object + type: array + type: object + parser: + properties: + emit_invalid_record_to_error: + type: boolean + hash_value_field: + type: string + inject_key_prefix: + type: string + key_name: + type: string + parse: + properties: + delimiter: + type: string + delimiter_pattern: + type: string + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + format_firstline: + type: string + keep_time_key: + type: boolean + label_delimiter: + type: string + local_time: + type: boolean + multiline: + items: + type: string + type: array + null_empty_string: + type: boolean + null_value_pattern: + type: string + patterns: + items: + properties: + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + keep_time_key: + type: boolean + local_time: + type: boolean + null_empty_string: + type: boolean + null_value_pattern: + type: string + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + type: array + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + parsers: + items: + properties: + delimiter: + type: string + delimiter_pattern: + type: string + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + format_firstline: + type: string + keep_time_key: + type: boolean + label_delimiter: + type: string + local_time: + type: boolean + multiline: + items: + type: string + type: array + null_empty_string: + type: boolean + null_value_pattern: + type: string + patterns: + items: + properties: + estimate_current_event: + type: boolean + expression: + type: string + format: + type: string + keep_time_key: + type: boolean + local_time: + type: boolean + null_empty_string: + type: boolean + null_value_pattern: + type: string + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + type: array + time_format: + type: string + time_key: + type: string + time_type: + type: string + timezone: + type: string + type: + type: string + types: + type: string + utc: + type: boolean + type: object + type: array + remove_key_name_field: + type: boolean + replace_invalid_sequence: + type: boolean + reserve_data: + type: boolean + reserve_time: + type: boolean + type: object + prometheus: + properties: + labels: + additionalProperties: + type: string + type: object + metrics: + items: + properties: + buckets: + type: string + desc: + type: string + key: + type: string + labels: + additionalProperties: + type: string + type: object + name: + type: string + type: + type: string + required: + - desc + - name + - type + type: object + type: array + type: object + record_modifier: + properties: + char_encoding: + type: string + prepare_value: + type: string + records: + items: + additionalProperties: + type: string + type: object + type: array + remove_keys: + type: string + replaces: + items: + properties: + expression: + type: string + key: + type: string + replace: + type: string + required: + - expression + - key + - replace + type: object + type: array + whitelist_keys: + type: string + type: object + record_transformer: + properties: + auto_typecast: + type: boolean + enable_ruby: + type: boolean + keep_keys: + type: string + records: + items: + additionalProperties: + type: string + type: object + type: array + remove_keys: + type: string + renew_record: + type: boolean + renew_time_key: + type: string + type: object + stdout: + properties: + output_type: + type: string + type: object + sumologic: + properties: + collector_key_name: + type: string + collector_value: + type: string + exclude_container_regex: + type: string + exclude_facility_regex: + type: string + exclude_host_regex: + type: string + exclude_namespace_regex: + type: string + exclude_pod_regex: + type: string + exclude_priority_regex: + type: string + exclude_unit_regex: + type: string + log_format: + type: string + source_category: + type: string + source_category_key_name: + type: string + source_category_prefix: + type: string + source_category_replace_dash: + type: string + source_host: + type: string + source_host_key_name: + type: string + source_name: + type: string + source_name_key_name: + type: string + tracing_annotation_prefix: + type: string + tracing_container_name: + type: string + tracing_format: + type: boolean + tracing_host: + type: string + tracing_label_prefix: + type: string + tracing_namespace: + type: string + tracing_pod: + type: string + tracing_pod_id: + type: string + type: object + tag_normaliser: + properties: + format: + type: string + type: object + throttle: + properties: + group_bucket_limit: + type: integer + group_bucket_period_s: + type: integer + group_drop_logs: + type: boolean + group_key: + type: string + group_reset_rate_s: + type: integer + group_warning_delay_s: + type: integer + type: object + type: object + type: array + loggingRef: + type: string + nodeAgents: + items: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + name: + type: string + nodeAgentFluentbit: + properties: + bufferStorage: + properties: + storage.backlog.mem_limit: + type: string + storage.checksum: + type: string + storage.path: + type: string + storage.sync: + type: string + type: object + bufferStorageVolume: + properties: + emptyDir: + properties: + medium: + type: string + sizeLimit: + type: string + type: object + host_path: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + pvc: + properties: + source: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + storageClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + type: object + type: object + containersPath: + type: string + coroStackSize: + format: int32 + type: integer + customConfigSecret: + type: string + daemonSet: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + properties: + minReadySeconds: + format: int32 + type: integer + revisionHistoryLimit: + format: int32 + type: integer + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + template: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + properties: + activeDeadlineSeconds: + format: int64 + type: integer + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + preference: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + properties: + podAffinityTerm: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + weight: + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + namespaces: + items: + type: string + type: array + topologyKey: + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + automountServiceAccountToken: + type: boolean + containers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + dnsConfig: + properties: + nameservers: + items: + type: string + type: array + options: + items: + properties: + name: + type: string + value: + type: string + type: object + type: array + searches: + items: + type: string + type: array + type: object + dnsPolicy: + type: string + enableServiceLinks: + type: boolean + ephemeralContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + targetContainerName: + type: string + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + hostAliases: + items: + properties: + hostnames: + items: + type: string + type: array + ip: + type: string + type: object + type: array + hostIPC: + type: boolean + hostNetwork: + type: boolean + hostPID: + type: boolean + hostname: + type: string + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + initContainers: + items: + properties: + args: + items: + type: string + type: array + command: + items: + type: string + type: array + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + items: + properties: + configMapRef: + properties: + name: + type: string + optional: + type: boolean + type: object + prefix: + type: string + secretRef: + properties: + name: + type: string + optional: + type: boolean + type: object + type: object + type: array + image: + type: string + imagePullPolicy: + type: string + lifecycle: + properties: + postStart: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + name: + type: string + ports: + items: + properties: + containerPort: + format: int32 + type: integer + hostIP: + type: string + hostPort: + format: int32 + type: integer + name: + type: string + protocol: + type: string + required: + - containerPort + type: object + type: array + readinessProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + type: object + failureThreshold: + format: int32 + type: integer + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + format: int32 + type: integer + type: object + stdin: + type: boolean + stdinOnce: + type: boolean + terminationMessagePath: + type: string + terminationMessagePolicy: + type: string + tty: + type: boolean + volumeDevices: + items: + properties: + devicePath: + type: string + name: + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + items: + properties: + mountPath: + type: string + mountPropagation: + type: string + name: + type: string + readOnly: + type: boolean + subPath: + type: string + subPathExpr: + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + type: string + required: + - name + type: object + type: array + nodeName: + type: string + nodeSelector: + additionalProperties: + type: string + type: object + overhead: + additionalProperties: + type: string + type: object + preemptionPolicy: + type: string + priority: + format: int32 + type: integer + priorityClassName: + type: string + readinessGates: + items: + properties: + conditionType: + type: string + required: + - conditionType + type: object + type: array + restartPolicy: + type: string + runtimeClassName: + type: string + schedulerName: + type: string + securityContext: + properties: + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + serviceAccountName: + type: string + setHostnameAsFQDN: + type: boolean + shareProcessNamespace: + type: boolean + subdomain: + type: string + terminationGracePeriodSeconds: + format: int64 + type: integer + tolerations: + items: + properties: + effect: + type: string + key: + type: string + operator: + type: string + tolerationSeconds: + format: int64 + type: integer + value: + type: string + type: object + type: array + topologySpreadConstraints: + items: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + maxSkew: + format: int32 + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + volumes: + items: + properties: + awsElasticBlockStore: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + azureDisk: + properties: + cachingMode: + type: string + diskName: + type: string + diskURI: + type: string + fsType: + type: string + kind: + type: string + readOnly: + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + properties: + readOnly: + type: boolean + secretName: + type: string + shareName: + type: string + required: + - secretName + - shareName + type: object + cephfs: + properties: + monitors: + items: + type: string + type: array + path: + type: string + readOnly: + type: boolean + secretFile: + type: string + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - monitors + type: object + cinder: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeID: + type: string + required: + - volumeID + type: object + configMap: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + csi: + properties: + driver: + type: string + fsType: + type: string + nodePublishSecretRef: + properties: + name: + type: string + type: object + readOnly: + type: boolean + volumeAttributes: + additionalProperties: + type: string + type: object + required: + - driver + type: object + downwardAPI: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + properties: + medium: + type: string + sizeLimit: + type: string + type: object + ephemeral: + properties: + readOnly: + type: boolean + volumeClaimTemplate: + properties: + metadata: + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + storageClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + required: + - spec + type: object + type: object + fc: + properties: + fsType: + type: string + lun: + format: int32 + type: integer + readOnly: + type: boolean + targetWWNs: + items: + type: string + type: array + wwids: + items: + type: string + type: array + type: object + flexVolume: + properties: + driver: + type: string + fsType: + type: string + options: + additionalProperties: + type: string + type: object + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + required: + - driver + type: object + flocker: + properties: + datasetName: + type: string + datasetUUID: + type: string + type: object + gcePersistentDisk: + properties: + fsType: + type: string + partition: + format: int32 + type: integer + pdName: + type: string + readOnly: + type: boolean + required: + - pdName + type: object + gitRepo: + properties: + directory: + type: string + repository: + type: string + revision: + type: string + required: + - repository + type: object + glusterfs: + properties: + endpoints: + type: string + path: + type: string + readOnly: + type: boolean + required: + - endpoints + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + iscsi: + properties: + chapAuthDiscovery: + type: boolean + chapAuthSession: + type: boolean + fsType: + type: string + initiatorName: + type: string + iqn: + type: string + iscsiInterface: + type: string + lun: + format: int32 + type: integer + portals: + items: + type: string + type: array + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + targetPortal: + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + type: string + nfs: + properties: + path: + type: string + readOnly: + type: boolean + server: + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + properties: + fsType: + type: string + pdID: + type: string + required: + - pdID + type: object + portworxVolume: + properties: + fsType: + type: string + readOnly: + type: boolean + volumeID: + type: string + required: + - volumeID + type: object + projected: + properties: + defaultMode: + format: int32 + type: integer + sources: + items: + properties: + configMap: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + downwardAPI: + properties: + items: + items: + properties: + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + mode: + format: int32 + type: integer + path: + type: string + resourceFieldRef: + properties: + containerName: + type: string + divisor: + type: string + resource: + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + properties: + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + name: + type: string + optional: + type: boolean + type: object + serviceAccountToken: + properties: + audience: + type: string + expirationSeconds: + format: int64 + type: integer + path: + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + properties: + group: + type: string + readOnly: + type: boolean + registry: + type: string + tenant: + type: string + user: + type: string + volume: + type: string + required: + - registry + - volume + type: object + rbd: + properties: + fsType: + type: string + image: + type: string + keyring: + type: string + monitors: + items: + type: string + type: array + pool: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + user: + type: string + required: + - image + - monitors + type: object + scaleIO: + properties: + fsType: + type: string + gateway: + type: string + protectionDomain: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + sslEnabled: + type: boolean + storageMode: + type: string + storagePool: + type: string + system: + type: string + volumeName: + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + properties: + defaultMode: + format: int32 + type: integer + items: + items: + properties: + key: + type: string + mode: + format: int32 + type: integer + path: + type: string + required: + - key + - path + type: object + type: array + optional: + type: boolean + secretName: + type: string + type: object + storageos: + properties: + fsType: + type: string + readOnly: + type: boolean + secretRef: + properties: + name: + type: string + type: object + volumeName: + type: string + volumeNamespace: + type: string + type: object + vsphereVolume: + properties: + fsType: + type: string + storagePolicyID: + type: string + storagePolicyName: + type: string + volumePath: + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + type: object + updateStrategy: + properties: + rollingUpdate: + properties: + maxUnavailable: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: object + type: + type: string + type: object + type: object + type: object + disableKubernetesFilter: + type: boolean + enableUpstream: + type: boolean + enabled: + type: boolean + extraVolumeMounts: + items: + properties: + destination: + pattern: ^/.+$ + type: string + readOnly: + type: boolean + source: + pattern: ^/.+$ + type: string + required: + - destination + - source + type: object + type: array + filterAws: + properties: + Match: + type: string + account_id: + type: boolean + ami_id: + type: boolean + az: + type: boolean + ec2_instance_id: + type: boolean + ec2_instance_type: + type: boolean + hostname: + type: boolean + imds_version: + type: string + private_ip: + type: boolean + vpc_id: + type: boolean + type: object + filterKubernetes: + properties: + Annotations: + type: string + Buffer_Size: + type: string + Dummy_Meta: + type: string + K8S-Logging.Exclude: + type: string + K8S-Logging.Parser: + type: string + Keep_Log: + type: string + Kube_CA_File: + type: string + Kube_CA_Path: + type: string + Kube_Tag_Prefix: + type: string + Kube_Token_File: + type: string + Kube_URL: + type: string + Kube_meta_preload_cache_dir: + type: string + Labels: + type: string + Match: + type: string + Merge_Log: + type: string + Merge_Log_Key: + type: string + Merge_Log_Trim: + type: string + Merge_Parser: + type: string + Regex_Parser: + type: string + Use_Journal: + type: string + tls.debug: + type: string + tls.verify: + type: string + type: object + flush: + format: int32 + type: integer + forwardOptions: + properties: + Require_ack_response: + type: boolean + Retry_Limit: + type: string + Send_options: + type: boolean + Tag: + type: string + Time_as_Integer: + type: boolean + type: object + grace: + format: int32 + type: integer + inputTail: + properties: + Buffer_Chunk_Size: + type: string + Buffer_Max_Size: + type: string + DB: + type: string + DB_Sync: + type: string + Docker_Mode: + type: string + Docker_Mode_Flush: + type: string + Exclude_Path: + type: string + Ignore_Older: + type: string + Key: + type: string + Mem_Buf_Limit: + type: string + Multiline: + type: string + Multiline_Flush: + type: string + Parser: + type: string + Parser_Firstline: + type: string + Parser_N: + items: + type: string + type: array + Path: + type: string + Path_Key: + type: string + Refresh_Interval: + type: string + Rotate_Wait: + type: string + Skip_Long_Lines: + type: string + Tag: + type: string + Tag_Regex: + type: string + storage.type: + type: string + type: object + livenessDefaultCheck: + type: boolean + logLevel: + type: string + metrics: + properties: + interval: + type: string + path: + type: string + port: + format: int32 + type: integer + prometheusAnnotations: + type: boolean + serviceMonitor: + type: boolean + serviceMonitorConfig: + properties: + additionalLabels: + additionalProperties: + type: string + type: object + honorLabels: + type: boolean + metricRelabelings: + items: + properties: + action: + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + type: string + type: array + targetLabel: + type: string + type: object + type: array + relabelings: + items: + properties: + action: + type: string + modulus: + format: int64 + type: integer + regex: + type: string + replacement: + type: string + separator: + type: string + sourceLabels: + items: + type: string + type: array + targetLabel: + type: string + type: object + type: array + type: object + timeout: + type: string + type: object + metricsService: + properties: + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + spec: + properties: + clusterIP: + type: string + externalIPs: + items: + type: string + type: array + externalName: + type: string + externalTrafficPolicy: + type: string + healthCheckNodePort: + format: int32 + type: integer + ipFamily: + type: string + loadBalancerIP: + type: string + loadBalancerSourceRanges: + items: + type: string + type: array + ports: + items: + properties: + appProtocol: + type: string + name: + type: string + nodePort: + format: int32 + type: integer + port: + format: int32 + type: integer + protocol: + type: string + targetPort: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + type: array + publishNotReadyAddresses: + type: boolean + selector: + additionalProperties: + type: string + type: object + sessionAffinity: + type: string + sessionAffinityConfig: + properties: + clientIP: + properties: + timeoutSeconds: + format: int32 + type: integer + type: object + type: object + topologyKeys: + items: + type: string + type: array + type: + type: string + type: object + type: object + network: + properties: + connectTimeout: + format: int32 + type: integer + keepalive: + type: boolean + keepaliveIdleTimeout: + format: int32 + type: integer + keepaliveMaxRecycle: + format: int32 + type: integer + type: object + podPriorityClassName: + type: string + positiondb: + properties: + emptyDir: + properties: + medium: + type: string + sizeLimit: + type: string + type: object + host_path: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + hostPath: + properties: + path: + type: string + type: + type: string + required: + - path + type: object + pvc: + properties: + source: + properties: + claimName: + type: string + readOnly: + type: boolean + required: + - claimName + type: object + spec: + properties: + accessModes: + items: + type: string + type: array + dataSource: + properties: + apiGroup: + type: string + kind: + type: string + name: + type: string + required: + - kind + - name + type: object + resources: + properties: + limits: + additionalProperties: + type: string + type: object + requests: + additionalProperties: + type: string + type: object + type: object + selector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + type: object + type: object + storageClassName: + type: string + volumeMode: + type: string + volumeName: + type: string + type: object + type: object + type: object + security: + properties: + podSecurityContext: + properties: + fsGroup: + format: int64 + type: integer + fsGroupChangePolicy: + type: string + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + supplementalGroups: + items: + format: int64 + type: integer + type: array + sysctls: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + podSecurityPolicyCreate: + type: boolean + roleBasedAccessControlCreate: + type: boolean + securityContext: + properties: + allowPrivilegeEscalation: + type: boolean + capabilities: + properties: + add: + items: + type: string + type: array + drop: + items: + type: string + type: array + type: object + privileged: + type: boolean + procMount: + type: string + readOnlyRootFilesystem: + type: boolean + runAsGroup: + format: int64 + type: integer + runAsNonRoot: + type: boolean + runAsUser: + format: int64 + type: integer + seLinuxOptions: + properties: + level: + type: string + role: + type: string + type: + type: string + user: + type: string + type: object + seccompProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object + windowsOptions: + properties: + gmsaCredentialSpec: + type: string + gmsaCredentialSpecName: + type: string + runAsUserName: + type: string + type: object + type: object + serviceAccount: + type: string + type: object + serviceAccount: + properties: + automountServiceAccountToken: + type: boolean + imagePullSecrets: + items: + properties: + name: + type: string + type: object + type: array + metadata: + properties: + annotations: + additionalProperties: + type: string + type: object + labels: + additionalProperties: + type: string + type: object + type: object + secrets: + items: + properties: + apiVersion: + type: string + fieldPath: + type: string + kind: + type: string + name: + type: string + namespace: + type: string + resourceVersion: + type: string + uid: + type: string + type: object + type: array + type: object + targetHost: + type: string + targetPort: + format: int32 + type: integer + tls: + properties: + enabled: + type: boolean + secretName: + type: string + sharedKey: + type: string + required: + - enabled + type: object + varLogsPath: + type: string + type: object + profile: + type: string + type: object + type: array + watchNamespaces: + items: + type: string + type: array + required: + - controlNamespace + type: object + status: + properties: + configCheckResults: + additionalProperties: + type: boolean + type: object + type: object + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_outputs.yaml b/charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_outputs.yaml new file mode 100755 index 000000000..85ee16497 --- /dev/null +++ b/charts/rancher-logging/rancher-logging-crd/3.9.400/templates/logging.banzaicloud.io_outputs.yaml @@ -0,0 +1,4715 @@ + +--- +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.4 + creationTimestamp: null + name: outputs.logging.banzaicloud.io +spec: + additionalPrinterColumns: + - JSONPath: .status.active + description: Is the output active? + name: Active + type: boolean + - JSONPath: .status.problemsCount + description: Number of problems + name: Problems + type: integer + group: logging.banzaicloud.io + names: + categories: + - logging-all + kind: Output + listKind: OutputList + plural: outputs + singular: output + preserveUnknownFields: false + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + properties: + awsElasticsearch: + properties: + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + endpoint: + properties: + access_key_id: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + assume_role_arn: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + assume_role_session_name: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + assume_role_web_identity_token_file: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + ecs_container_credentials_relative_uri: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + region: + type: string + secret_access_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + sts_credentials_region: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + url: + type: string + type: object + flush_interval: + type: string + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + include_tag_key: + type: boolean + logstash_format: + type: boolean + tag_key: + type: string + type: object + azurestorage: + properties: + auto_create_container: + type: boolean + azure_container: + type: string + azure_imds_api_version: + type: string + azure_object_key_format: + type: string + azure_storage_access_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + azure_storage_account: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + azure_storage_sas_token: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + format: + type: string + path: + type: string + required: + - azure_container + - azure_storage_access_key + - azure_storage_account + - azure_storage_sas_token + type: object + cloudwatch: + properties: + auto_create_stream: + type: boolean + aws_instance_profile_credentials_retries: + type: integer + aws_key_id: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + aws_sec_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + aws_sts_role_arn: + type: string + aws_sts_session_name: + type: string + aws_use_sts: + type: boolean + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + concurrency: + type: integer + endpoint: + type: string + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + http_proxy: + type: string + include_time_key: + type: boolean + json_handler: + type: string + localtime: + type: boolean + log_group_aws_tags: + type: string + log_group_aws_tags_key: + type: string + log_group_name: + type: string + log_group_name_key: + type: string + log_rejected_request: + type: string + log_stream_name: + type: string + log_stream_name_key: + type: string + max_events_per_batch: + type: integer + max_message_length: + type: integer + message_keys: + type: string + put_log_events_disable_retry_limit: + type: boolean + put_log_events_retry_limit: + type: integer + put_log_events_retry_wait: + type: string + region: + type: string + remove_log_group_aws_tags_key: + type: string + remove_log_group_name_key: + type: string + remove_log_stream_name_key: + type: string + remove_retention_in_days: + type: string + retention_in_days: + type: string + retention_in_days_key: + type: string + use_tag_as_group: + type: boolean + use_tag_as_stream: + type: boolean + required: + - region + type: object + datadog: + properties: + api_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + compression_level: + type: string + dd_hostname: + type: string + dd_source: + type: string + dd_sourcecategory: + type: string + dd_tags: + type: string + host: + type: string + include_tag_key: + type: boolean + max_backoff: + type: string + max_retries: + type: string + no_ssl_validation: + type: boolean + port: + type: string + service: + type: string + ssl_port: + type: string + tag_key: + type: string + timestamp_key: + type: string + use_compression: + type: boolean + use_http: + type: boolean + use_json: + type: boolean + use_ssl: + type: boolean + required: + - api_key + type: object + elasticsearch: + properties: + application_name: + type: string + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + bulk_message_request_threshold: + type: string + ca_file: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + client_cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + client_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + client_key_pass: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + content_type: + type: string + custom_headers: + type: string + customize_template: + type: string + default_elasticsearch_version: + type: string + deflector_alias: + type: string + enable_ilm: + type: boolean + exception_backup: + type: boolean + fail_on_putting_template_retry_exceed: + type: boolean + flatten_hashes: + type: boolean + flatten_hashes_separator: + type: string + host: + type: string + hosts: + type: string + http_backend: + type: string + id_key: + type: string + ignore_exceptions: + type: string + ilm_policy: + type: string + ilm_policy_id: + type: string + ilm_policy_overwrite: + type: boolean + include_index_in_url: + type: boolean + include_tag_key: + type: boolean + include_timestamp: + type: boolean + index_date_pattern: + type: string + index_name: + type: string + index_prefix: + type: string + log_es_400_reason: + type: boolean + logstash_dateformat: + type: string + logstash_format: + type: boolean + logstash_prefix: + type: string + logstash_prefix_separator: + type: string + max_retry_get_es_version: + type: string + max_retry_putting_template: + type: string + password: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + path: + type: string + pipeline: + type: string + port: + type: integer + prefer_oj_serializer: + type: boolean + reconnect_on_error: + type: boolean + reload_after: + type: string + reload_connections: + type: boolean + reload_on_failure: + type: boolean + remove_keys_on_update: + type: string + remove_keys_on_update_key: + type: string + request_timeout: + type: string + resurrect_after: + type: string + retry_tag: + type: string + rollover_index: + type: boolean + routing_key: + type: string + scheme: + type: string + sniffer_class_name: + type: string + ssl_max_version: + type: string + ssl_min_version: + type: string + ssl_verify: + type: boolean + ssl_version: + type: string + suppress_doc_wrap: + type: boolean + suppress_type_name: + type: boolean + tag_key: + type: string + target_index_key: + type: string + target_type_key: + type: string + template_file: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + template_name: + type: string + template_overwrite: + type: boolean + templates: + type: string + time_key: + type: string + time_key_format: + type: string + time_parse_error_tag: + type: string + time_precision: + type: string + type_name: + type: string + unrecoverable_error_types: + type: string + user: + type: string + utc_index: + type: boolean + validate_client_version: + type: boolean + verify_es_version_at_startup: + type: boolean + with_transporter_log: + type: boolean + write_operation: + type: string + type: object + file: + properties: + add_path_suffix: + type: boolean + append: + type: boolean + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + path: + type: string + path_suffix: + type: string + symlink_path: + type: boolean + required: + - path + type: object + forward: + properties: + ack_response_timeout: + type: integer + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + connect_timeout: + type: integer + dns_round_robin: + type: boolean + expire_dns_cache: + type: integer + hard_timeout: + type: integer + heartbeat_interval: + type: integer + heartbeat_type: + type: string + ignore_network_errors_at_startup: + type: boolean + keepalive: + type: boolean + keepalive_timeout: + type: integer + phi_failure_detector: + type: boolean + phi_threshold: + type: integer + recover_wait: + type: integer + require_ack_response: + type: boolean + security: + properties: + allow_anonymous_source: + type: boolean + self_hostname: + type: string + shared_key: + type: string + user_auth: + type: boolean + required: + - self_hostname + - shared_key + type: object + send_timeout: + type: integer + servers: + items: + properties: + host: + type: string + name: + type: string + password: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + port: + type: integer + shared_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + standby: + type: boolean + username: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + weight: + type: integer + required: + - host + type: object + type: array + tls_allow_self_signed_cert: + type: boolean + tls_cert_logical_store_name: + type: string + tls_cert_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_cert_thumbprint: + type: string + tls_cert_use_enterprise_store: + type: boolean + tls_ciphers: + type: string + tls_client_cert_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_client_private_key_passphrase: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_client_private_key_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_insecure_mode: + type: boolean + tls_verify_hostname: + type: boolean + tls_version: + type: string + verify_connection_at_startup: + type: boolean + required: + - servers + type: object + gcs: + properties: + acl: + type: string + auto_create_bucket: + type: boolean + bucket: + type: string + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + client_retries: + type: integer + client_timeout: + type: integer + credentials_json: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + encryption_key: + type: string + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + hex_random_length: + type: integer + keyfile: + type: string + object_key_format: + type: string + object_metadata: + items: + properties: + key: + type: string + value: + type: string + required: + - key + - value + type: object + type: array + overwrite: + type: boolean + path: + type: string + project: + type: string + storage_class: + type: string + store_as: + type: string + transcoding: + type: boolean + required: + - bucket + - project + type: object + gelf: + properties: + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + host: + type: string + port: + type: integer + protocol: + type: string + tls: + type: boolean + tls_options: + additionalProperties: + type: string + type: object + required: + - host + - port + type: object + http: + properties: + auth: + properties: + password: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + username: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + required: + - password + - username + type: object + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + content_type: + type: string + endpoint: + type: string + error_response_as_unrecoverable: + type: boolean + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + headers: + additionalProperties: + type: string + type: object + http_method: + type: string + json_array: + type: boolean + open_timeout: + type: integer + proxy: + type: string + read_timeout: + type: integer + retryable_response_codes: + items: + type: integer + type: array + ssl_timeout: + type: integer + tls_ca_cert_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_ciphers: + type: string + tls_client_cert_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_private_key_passphrase: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_private_key_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + tls_verify_mode: + type: string + tls_version: + type: string + required: + - endpoint + type: object + kafka: + properties: + ack_timeout: + type: integer + brokers: + type: string + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + compression_codec: + type: string + default_message_key: + type: string + default_partition_key: + type: string + default_topic: + type: string + exclude_partion_key: + type: boolean + exclude_topic_key: + type: boolean + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + get_kafka_client_log: + type: boolean + headers: + additionalProperties: + type: string + type: object + headers_from_record: + additionalProperties: + type: string + type: object + idempotent: + type: boolean + max_send_retries: + type: integer + message_key_key: + type: string + partition_key: + type: string + partition_key_key: + type: string + password: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + required_acks: + type: integer + sasl_over_ssl: + type: boolean + scram_mechanism: + type: string + ssl_ca_cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + ssl_ca_certs_from_system: + type: boolean + ssl_client_cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + ssl_client_cert_chain: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + ssl_client_cert_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + ssl_verify_hostname: + type: boolean + topic_key: + type: string + use_default_for_unknown_topic: + type: boolean + username: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + required: + - brokers + - format + type: object + kinesisStream: + properties: + assume_role_credentials: + properties: + duration_seconds: + type: string + external_id: + type: string + policy: + type: string + role_arn: + type: string + role_session_name: + type: string + required: + - role_arn + - role_session_name + type: object + aws_iam_retries: + type: integer + aws_key_id: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + aws_sec_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + aws_ses_token: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + batch_request_max_count: + type: integer + batch_request_max_size: + type: integer + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + partition_key: + type: string + process_credentials: + properties: + process: + type: string + required: + - process + type: object + region: + type: string + reset_backoff_if_success: + type: boolean + retries_on_batch_request: + type: integer + stream_name: + type: string + required: + - stream_name + type: object + logdna: + properties: + api_key: + type: string + app: + type: string + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + hostname: + type: string + ingester_domain: + type: string + ingester_endpoint: + type: string + request_timeout: + type: string + tags: + type: string + required: + - api_key + - hostname + type: object + loggingRef: + type: string + logz: + properties: + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + endpoint: + properties: + port: + type: integer + token: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + url: + type: string + type: object + gzip: + type: boolean + http_idle_timeout: + type: integer + output_include_tags: + type: boolean + output_include_time: + type: boolean + retry_count: + type: integer + retry_sleep: + type: integer + required: + - endpoint + type: object + loki: + properties: + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + ca_cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + configure_kubernetes_labels: + type: boolean + drop_single_key: + type: boolean + extra_labels: + additionalProperties: + type: string + type: object + extract_kubernetes_labels: + type: boolean + insecure_tls: + type: boolean + key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + labels: + additionalProperties: + type: string + type: object + line_format: + type: string + password: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + remove_keys: + items: + type: string + type: array + tenant: + type: string + url: + type: string + username: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + type: object + newrelic: + properties: + api_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + base_uri: + type: string + license_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + type: object + nullout: + type: object + oss: + properties: + aaccess_key_secret: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + access_key_id: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + auto_create_bucket: + type: boolean + bucket: + type: string + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + check_bucket: + type: boolean + check_object: + type: boolean + download_crc_enable: + type: boolean + endpoint: + type: string + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + hex_random_length: + type: integer + index_format: + type: string + key_format: + type: string + open_timeout: + type: integer + oss_sdk_log_dir: + type: string + overwrite: + type: boolean + path: + type: string + read_timeout: + type: integer + store_as: + type: string + upload_crc_enable: + type: boolean + warn_for_delay: + type: string + required: + - aaccess_key_secret + - access_key_id + - bucket + - endpoint + type: object + redis: + properties: + allow_duplicate_key: + type: boolean + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + db_number: + type: integer + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + host: + type: string + insert_key_prefix: + type: string + password: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + port: + type: integer + strftime_format: + type: string + ttl: + type: integer + type: object + s3: + properties: + acl: + type: string + assume_role_credentials: + properties: + duration_seconds: + type: string + external_id: + type: string + policy: + type: string + role_arn: + type: string + role_session_name: + type: string + required: + - role_arn + - role_session_name + type: object + auto_create_bucket: + type: string + aws_iam_retries: + type: string + aws_key_id: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + aws_sec_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + check_apikey_on_start: + type: string + check_bucket: + type: string + check_object: + type: string + clustername: + type: string + compute_checksums: + type: string + enable_transfer_acceleration: + type: string + force_path_style: + type: string + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + grant_full_control: + type: string + grant_read: + type: string + grant_read_acp: + type: string + grant_write_acp: + type: string + hex_random_length: + type: string + index_format: + type: string + instance_profile_credentials: + properties: + http_open_timeout: + type: string + http_read_timeout: + type: string + ip_address: + type: string + port: + type: string + retries: + type: string + type: object + oneeye_format: + type: boolean + overwrite: + type: string + path: + type: string + proxy_uri: + type: string + s3_bucket: + type: string + s3_endpoint: + type: string + s3_metadata: + type: string + s3_object_key_format: + type: string + s3_region: + type: string + shared_credentials: + properties: + path: + type: string + profile_name: + type: string + type: object + signature_version: + type: string + sse_customer_algorithm: + type: string + sse_customer_key: + type: string + sse_customer_key_md5: + type: string + ssekms_key_id: + type: string + ssl_verify_peer: + type: string + storage_class: + type: string + store_as: + type: string + use_bundled_cert: + type: string + use_server_side_encryption: + type: string + warn_for_delay: + type: string + required: + - s3_bucket + type: object + splunkHec: + properties: + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + ca_file: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + ca_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + client_cert: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + client_key: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + coerce_to_utf8: + type: boolean + data_type: + type: string + fields: + additionalProperties: + type: string + type: object + format: + properties: + add_newline: + type: boolean + message_key: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + hec_host: + type: string + hec_port: + type: integer + hec_token: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + host: + type: string + host_key: + type: string + idle_timeout: + type: integer + index: + type: string + index_key: + type: string + insecure_ssl: + type: boolean + keep_keys: + type: boolean + metric_name_key: + type: string + metric_value_key: + type: string + metrics_from_event: + type: boolean + non_utf8_replacement_string: + type: string + open_timeout: + type: integer + protocol: + type: string + read_timeout: + type: integer + source: + type: string + source_key: + type: string + sourcetype: + type: string + sourcetype_key: + type: string + ssl_ciphers: + type: string + required: + - hec_host + - hec_token + type: object + sumologic: + properties: + add_timestamp: + type: boolean + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + compress: + type: boolean + compress_encoding: + type: string + custom_dimensions: + type: string + custom_fields: + items: + type: string + type: array + data_type: + type: string + delimiter: + type: string + disable_cookies: + type: boolean + endpoint: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + log_format: + type: string + log_key: + type: string + metric_data_format: + type: string + open_timeout: + type: integer + proxy_uri: + type: string + source_category: + type: string + source_host: + type: string + source_name: + type: string + source_name_key: + type: string + sumo_client: + type: string + timestamp_key: + type: string + verify_ssl: + type: boolean + required: + - endpoint + - source_name + type: object + syslog: + properties: + buffer: + properties: + chunk_full_threshold: + type: string + chunk_limit_records: + type: integer + chunk_limit_size: + type: string + compress: + type: string + delayed_commit_timeout: + type: string + disable_chunk_backup: + type: boolean + flush_at_shutdown: + type: boolean + flush_interval: + type: string + flush_mode: + type: string + flush_thread_burst_interval: + type: string + flush_thread_count: + type: integer + flush_thread_interval: + type: string + overflow_action: + type: string + path: + type: string + queue_limit_length: + type: integer + queued_chunks_limit_size: + type: integer + retry_exponential_backoff_base: + type: string + retry_forever: + type: boolean + retry_max_interval: + type: string + retry_max_times: + type: integer + retry_randomize: + type: boolean + retry_secondary_threshold: + type: string + retry_timeout: + type: string + retry_type: + type: string + retry_wait: + type: string + tags: + type: string + timekey: + type: string + timekey_use_utc: + type: boolean + timekey_wait: + type: string + timekey_zone: + type: string + total_limit_size: + type: string + type: + type: string + type: object + format: + properties: + app_name_field: + type: string + hostname_field: + type: string + log_field: + type: string + message_id_field: + type: string + proc_id_field: + type: string + rfc6587_message_size: + type: boolean + structured_data_field: + type: string + type: + enum: + - out_file + - json + - ltsv + - csv + - msgpack + - hash + - single_value + type: string + type: object + host: + type: string + insecure: + type: boolean + port: + type: integer + transport: + type: string + trusted_ca_path: + properties: + mountFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + value: + type: string + valueFrom: + properties: + secretKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + type: object + required: + - host + type: object + type: object + status: + properties: + active: + type: boolean + problems: + items: + type: string + type: array + problemsCount: + type: integer + type: object + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/rancher-logging/rancher-logging/3.9.400/.helmignore b/charts/rancher-logging/rancher-logging/3.9.400/.helmignore new file mode 100755 index 000000000..50af03172 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-logging/rancher-logging/3.9.400/Chart.yaml b/charts/rancher-logging/rancher-logging/3.9.400/Chart.yaml new file mode 100755 index 000000000..62a9a2fee --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/Chart.yaml @@ -0,0 +1,19 @@ +annotations: + catalog.cattle.io/auto-install: rancher-logging-crd=match + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: Logging + catalog.cattle.io/namespace: cattle-logging-system + catalog.cattle.io/provides-gvr: logging.banzaicloud.io.clusterflow/v1beta1 + catalog.cattle.io/release-name: rancher-logging + catalog.cattle.io/ui-component: logging +apiVersion: v1 +appVersion: 3.9.4 +description: Collects and filter logs using highly configurable CRDs. Powered by Banzai + Cloud Logging Operator. +icon: https://charts.rancher.io/assets/logos/logging.svg +keywords: +- logging +- monitoring +- security +name: rancher-logging +version: 3.9.400 diff --git a/charts/rancher-logging/rancher-logging/3.9.400/README.md b/charts/rancher-logging/rancher-logging/3.9.400/README.md new file mode 100755 index 000000000..e2080b743 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/README.md @@ -0,0 +1,131 @@ + +# Logging operator Chart + +[Logging operator](https://github.com/banzaicloud/logging-operator) Managed centralized logging component fluentd and fluent-bit instance on cluster. + +## tl;dr: + +```bash +$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com +$ helm repo update +$ helm install banzaicloud-stable/logging-operator +``` + +## Introduction + +This chart bootstraps a [Logging Operator](https://github.com/banzaicloud/logging-operator) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.8+ with Beta APIs enabled + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install --name my-release banzaicloud-stable/logging-operator +``` + +### CRDs +Use `createCustomResource=false` with Helm v3 to avoid trying to create CRDs from the `crds` folder and from templates at the same time. + +The command deploys **Logging operator** on the Kubernetes cluster with the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all Kubernetes components associated with the chart and deletes the release. + +## Configuration + +The following tables lists the configurable parameters of the logging-operator chart and their default values. + +| Parameter | Description | Default | +| --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ | +| `image.repository` | Container image repository | `ghcr.io/banzaicloud/logging-operator` | +| `image.tag` | Container image tag | `3.9.4` | +| `image.pullPolicy` | Container pull policy | `IfNotPresent` | +| `nameOverride` | Override name of app | `` | +| `fullnameOverride` | Override full name of app | `` | +| `namespaceOverride` | Override namespace of app | `` | +| `watchNamespace` | Namespace to watch for LoggingOperator CRD | `` | +| `rbac.enabled` | Create rbac service account and roles | `true` | +| `rbac.psp.enabled` | Must be used with `rbac.enabled` true. If true, creates & uses RBAC resources required in the cluster with [Pod Security Policies](https://kubernetes.io/docs/concepts/policy/pod-security-policy/) enabled. | `false` | +| `priorityClassName` | Operator priorityClassName | `{}` | +| `affinity` | Node Affinity | `{}` | +| `resources` | CPU/Memory resource requests/limits | `{}` | +| `tolerations` | Node Tolerations | `[]` | +| `nodeSelector` | Define which Nodes the Pods are scheduled on. | `{}` | +| `podLabels` | Define custom labels for logging-operator pods | `{}` | +| `annotations` | Define annotations for logging-operator pods | `{}` | +| `podSecurityContext` | Pod SecurityContext for Logging operator. [More info](https://kubernetes.io/docs/concepts/policy/security-context/) | `{"runAsNonRoot": true, "runAsUser": 1000, "fsGroup": 2000}` | +| `securityContext` | Container SecurityContext for Logging operator. [More info](https://kubernetes.io/docs/concepts/policy/security-context/) | `{"allowPrivilegeEscalation": false, "readOnlyRootFilesystem": true}` | +| `createCustomResource` | Create CRDs. | `true` | +| `monitoring.serviceMonitor.enabled` | Create Prometheus Operator servicemonitor. | `false` | +| `global.seLinux.enabled` | Add seLinuxOptions to Logging resources, requires the [rke2-selinux RPM](https://github.com/rancher/rke2-selinux/releases) | `false` | + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example: + +```bash +$ helm install --name my-release -f values.yaml banzaicloud-stable/logging-operator +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Installing Fluentd and Fluent-bit via logging + +The previous chart does **not** install `logging` resource to deploy Fluentd and Fluent-bit on cluster. To install them please use the [Logging Operator Logging](https://github.com/banzaicloud/logging-operator/tree/master/charts/logging-operator-logging) chart. + +## tl;dr: + +```bash +$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com +$ helm repo update +$ helm install banzaicloud-stable/logging-operator-logging +``` + +## Configuration + +The following tables lists the configurable parameters of the logging-operator-logging chart and their default values. +## tl;dr: + +```bash +$ helm repo add banzaicloud-stable https://kubernetes-charts.banzaicloud.com +$ helm repo update +$ helm install banzaicloud-stable/logging-operator-logging +``` + +## Configuration + +The following tables lists the configurable parameters of the logging-operator-logging chart and their default values. + +| Parameter | Description | Default | +| --------------------------------------------------- | ------------------------------------------------------ | ------------------------------ | +| `tls.enabled` | Enabled TLS communication between components | true | +| `tls.fluentdSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. | +| `tls.fluentbitSecretName` | Specified secret name, which contain tls certs | This will overwrite automatic Helm certificate generation. | +| `tls.sharedKey` | Shared key between nodes (fluentd-fluentbit) | [autogenerated] | +| `fluentbit.enabled` | Install fluent-bit | true | +| `fluentbit.namespace` | Specified fluentbit installation namespace | same as operator namespace | +| `fluentbit.image.tag` | Fluentbit container image tag | `1.6.10` | +| `fluentbit.image.repository` | Fluentbit container image repository | `fluent/fluent-bit` | +| `fluentbit.image.pullPolicy` | Fluentbit container pull policy | `IfNotPresent` | +| `fluentd.enabled` | Install fluentd | true | +| `fluentd.image.tag` | Fluentd container image tag | `v1.11.5-alpine-12` | +| `fluentd.image.repository` | Fluentd container image repository | `ghcr.io/banzaicloud/fluentd` | +| `fluentd.image.pullPolicy` | Fluentd container pull policy | `IfNotPresent` | +| `fluentd.volumeModImage.tag` | Fluentd volumeModImage container image tag | `latest` | +| `fluentd.volumeModImage.repository` | Fluentd volumeModImage container image repository | `busybox` | +| `fluentd.volumeModImage.pullPolicy` | Fluentd volumeModImage container pull policy | `IfNotPresent` | +| `fluentd.configReloaderImage.tag` | Fluentd configReloaderImage container image tag | `v0.2.2` | +| `fluentd.configReloaderImage.repository` | Fluentd configReloaderImage container image repository | `jimmidyson/configmap-reload` | +| `fluentd.configReloaderImage.pullPolicy` | Fluentd configReloaderImage container pull policy | `IfNotPresent` | +| `fluentd.fluentdPvcSpec.accessModes` | Fluentd persistence volume access modes | `[ReadWriteOnce]` | +| `fluentd.fluentdPvcSpec.resources.requests.storage` | Fluentd persistence volume size | `21Gi` | +| `fluentd.fluentdPvcSpec.storageClassName` | Fluentd persistence volume storageclass | `"""` | diff --git a/charts/rancher-logging/rancher-logging/3.9.400/app-readme.md b/charts/rancher-logging/rancher-logging/3.9.400/app-readme.md new file mode 100755 index 000000000..2de4ab4c5 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/app-readme.md @@ -0,0 +1,22 @@ +# Rancher Logging + +This chart is based off of the upstream [Banzai Logging Operator](https://banzaicloud.com/docs/one-eye/logging-operator/) chart. The chart deploys a logging operator and CRDs, which allows users to configure complex logging pipelines with a few simple custom resources. There are two levels of logging, which allow you to collect all logs in a cluster or from a single namespace. + +For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/logging/v2.5/). + +## Namespace-level logging + +To collect logs from a single namespace, users create flows and these flows are connected to outputs or cluster outputs. + +## Cluster-level logging + +To collect logs from an entire cluster, users create cluster flows and cluster outputs. + +## CRDs + +- [Cluster Flow](https://banzaicloud.com/docs/one-eye/logging-operator/crds/v1beta1/clusterflow_types/) - A cluster flow is a CRD (`ClusterFlow`) that defines what logs to collect from the entire cluster. The cluster flow must be deployed in the same namespace as the logging operator. +- [Cluster Output](https://banzaicloud.com/docs/one-eye/logging-operator/crds/v1beta1/clusteroutput_types/) - A cluster output is a CRD (`ClusterOutput`) that defines how to connect to logging providers so they can start collecting logs. The cluster output must be deployed in the same namespace as the logging operator. The convenience of using a cluster output is that either a cluster flow or flow can send logs to those providers without needing to define specific outputs in each namespace for each flow. +- [Flow](https://banzaicloud.com/docs/one-eye/logging-operator/crds/v1beta1/flow_types/) - A flow is a CRD (`Flow`) that defines what logs to collect from the namespace that it is deployed in. +- [Output](https://banzaicloud.com/docs/one-eye/logging-operator/crds/v1beta1/output_types/) - An output is a CRD (`Output`) that defines how to connect to logging providers so logs can be sent to the provider. + +For more information on how to configure the Helm chart, refer to the Helm README. diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/NOTES.txt b/charts/rancher-logging/rancher-logging/3.9.400/templates/NOTES.txt new file mode 100755 index 000000000..e69de29bb diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/_helpers.tpl b/charts/rancher-logging/rancher-logging/3.9.400/templates/_helpers.tpl new file mode 100755 index 000000000..ef3ee9aa8 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/_helpers.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "logging-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "logging-operator.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Provides the namespace the chart will be installed in using the builtin .Release.Namespace, +or, if provided, a manually overwritten namespace value. +*/}} +{{- define "logging-operator.namespace" -}} +{{- if .Values.namespaceOverride -}} +{{ .Values.namespaceOverride -}} +{{- else -}} +{{ .Release.Namespace }} +{{- end -}} +{{- end -}} + + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "logging-operator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "logging-operator.labels" -}} +app.kubernetes.io/name: {{ include "logging-operator.name" . }} +helm.sh/chart: {{ include "logging-operator.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- else -}} +{{- "" -}} +{{- end -}} +{{- end -}} + +{{- define "windowsEnabled" }} + {{- if not (kindIs "invalid" .Values.global.cattle.windows) }} + {{- if not (kindIs "invalid" .Values.global.cattle.windows.enabled) }} + {{- if .Values.global.cattle.windows.enabled }} +true + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/clusterrole.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/clusterrole.yaml new file mode 100755 index 000000000..709eedb91 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/clusterrole.yaml @@ -0,0 +1,167 @@ +{{- if .Values.rbac.enabled }} + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: {{ template "logging-operator.fullname" . }} +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + resources: + - endpoints + - namespaces + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - pods + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - get + - list + - watch +- apiGroups: + - apps + resources: + - daemonsets + - replicasets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + - extensions + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - extensions + - networking.k8s.io + resources: + - ingresses + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - extensions + - policy + resources: + - podsecuritypolicies + verbs: + - create + - delete + - get + - list + - patch + - update + - use + - watch +- apiGroups: + - logging.banzaicloud.io + resources: + - clusterflows + - clusteroutputs + - flows + - loggings + - outputs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - logging.banzaicloud.io + resources: + - clusterflows/status + - clusteroutputs/status + - flows/status + - loggings/status + - outputs/status + verbs: + - get + - patch + - update +- apiGroups: + - monitoring.coreos.com + resources: + - servicemonitors + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/clusterrolebinding.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/clusterrolebinding.yaml new file mode 100755 index 000000000..89d17d094 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/clusterrolebinding.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.enabled }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "logging-operator.fullname" . }} + labels: +{{ include "logging-operator.labels" . | indent 4 }} +subjects: + - kind: ServiceAccount + name: {{ template "logging-operator.fullname" . }} + namespace: {{ include "logging-operator.namespace" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "logging-operator.fullname" . }} + + {{- end }} \ No newline at end of file diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/crds.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/crds.yaml new file mode 100755 index 000000000..f573652d0 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/crds.yaml @@ -0,0 +1,6 @@ +{{- if .Values.createCustomResource -}} +{{- range $path, $bytes := .Files.Glob "crds/*.yaml" }} +{{ $.Files.Get $path }} +--- +{{- end }} +{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/deployment.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/deployment.yaml new file mode 100755 index 000000000..26d14cca2 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/deployment.yaml @@ -0,0 +1,68 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "logging-operator.fullname" . }} + namespace: {{ include "logging-operator.namespace" . }} + labels: +{{ include "logging-operator.labels" . | indent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: + app.kubernetes.io/name: {{ include "logging-operator.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ include "logging-operator.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + {{- with .Values.podLabels }} + {{ toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - name: {{ .Chart.Name }} + image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}" + args: {{ range .Values.extraArgs }} + - {{ . -}} + {{ end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + ports: + - name: http + containerPort: {{ .Values.http.port }} + + {{- if .Values.securityContext }} + securityContext: {{ toYaml .Values.securityContext | nindent 12 }} + {{- end }} + {{- if .Values.podSecurityContext }} + securityContext: {{ toYaml .Values.podSecurityContext | nindent 8 }} + {{- end }} + + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.rbac.enabled }} + serviceAccountName: {{ include "logging-operator.fullname" . }} + {{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/aks/logging.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/aks/logging.yaml new file mode 100755 index 000000000..916f93b41 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/aks/logging.yaml @@ -0,0 +1,58 @@ +{{- if .Values.additionalLoggingSources.aks.enabled }} +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Logging +metadata: + name: {{ .Release.Name }}-aks + namespace: {{ .Release.Namespace }} + labels: +{{ include "logging-operator.labels" . | indent 4 }} +spec: + controlNamespace: {{ .Release.Namespace }} + fluentbit: + image: + repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }} + tag: {{ .Values.images.fluentbit.tag }} + inputTail: + Tag: "aks" + Path: "/var/log/azure/kubelet-status.log" + {{- if .Values.global.psp.enabled }} + security: + podSecurityPolicyCreate: true + roleBasedAccessControlCreate: true + {{- end }} + {{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }} + {{- with $total_tolerations }} + tolerations: {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentbit.resources }} + resources: {{- toYaml . | nindent 6 }} + {{- end }} + fluentd: + image: + repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }} + tag: {{ .Values.images.fluentd.tag }} + configReloaderImage: + repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }} + tag: {{ .Values.images.config_reloader.tag }} + disablePvc: {{ .Values.disablePvc }} + {{- if .Values.global.psp.enabled }} + security: + podSecurityPolicyCreate: true + roleBasedAccessControlCreate: true + {{- end }} + {{- with .Values.tolerations }} + tolerations: {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentd.resources }} + resources: {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentd.livenessProbe }} + livenessProbe: {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/eks/logging.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/eks/logging.yaml new file mode 100755 index 000000000..da4af2d9e --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/eks/logging.yaml @@ -0,0 +1,59 @@ +{{- if .Values.additionalLoggingSources.eks.enabled }} +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Logging +metadata: + name: {{ .Release.Name }}-eks + namespace: {{ .Release.Namespace }} + labels: +{{ include "logging-operator.labels" . | indent 4 }} +spec: + controlNamespace: {{ .Release.Namespace }} + fluentbit: + image: + repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }} + tag: {{ .Values.images.fluentbit.tag }} + inputTail: + Tag: "eks" + Path: "/var/log/messages" + Parser: "syslog" + {{- if .Values.global.psp.enabled }} + security: + podSecurityPolicyCreate: true + roleBasedAccessControlCreate: true + {{- end }} + {{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }} + {{- with $total_tolerations }} + tolerations: {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentbit.resources }} + resources: {{- toYaml . | nindent 6 }} + {{- end }} + fluentd: + image: + repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }} + tag: {{ .Values.images.fluentd.tag }} + configReloaderImage: + repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }} + tag: {{ .Values.images.config_reloader.tag }} + disablePvc: {{ .Values.disablePvc }} + {{- if .Values.global.psp.enabled }} + security: + podSecurityPolicyCreate: true + roleBasedAccessControlCreate: true + {{- end }} + {{- with .Values.tolerations }} + tolerations: {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentd.resources }} + resources: {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentd.livenessProbe }} + livenessProbe: {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/gke/logging.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/gke/logging.yaml new file mode 100755 index 000000000..3823127b2 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/gke/logging.yaml @@ -0,0 +1,58 @@ +{{- if .Values.additionalLoggingSources.gke.enabled }} +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Logging +metadata: + name: {{ .Release.Name }}-gke + namespace: {{ .Release.Namespace }} + labels: +{{ include "logging-operator.labels" . | indent 4 }} +spec: + controlNamespace: {{ .Release.Namespace }} + fluentbit: + image: + repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }} + tag: {{ .Values.images.fluentbit.tag }} + inputTail: + Tag: "gke" + Path: "/var/log/kube-proxy.log" + {{- if .Values.global.psp.enabled }} + security: + podSecurityPolicyCreate: true + roleBasedAccessControlCreate: true + {{- end }} + {{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }} + {{- with $total_tolerations }} + tolerations: {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentbit.resources }} + resources: {{- toYaml . | nindent 6 }} + {{- end }} + fluentd: + image: + repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }} + tag: {{ .Values.images.fluentd.tag }} + configReloaderImage: + repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }} + tag: {{ .Values.images.config_reloader.tag }} + disablePvc: {{ .Values.disablePvc }} + {{- if .Values.global.psp.enabled }} + security: + podSecurityPolicyCreate: true + roleBasedAccessControlCreate: true + {{- end }} + {{- with .Values.tolerations }} + tolerations: {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentd.resources }} + resources: {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentd.livenessProbe }} + livenessProbe: {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/k3s/logging-k3s-openrc.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/k3s/logging-k3s-openrc.yaml new file mode 100755 index 000000000..cf5e988f3 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/k3s/logging-k3s-openrc.yaml @@ -0,0 +1,68 @@ +{{- if and .Values.additionalLoggingSources.k3s.enabled (eq .Values.additionalLoggingSources.k3s.container_engine "openrc")}} +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Logging +metadata: + name: {{ .Release.Name }}-k3s + namespace: {{ .Release.Namespace }} + labels: +{{ include "logging-operator.labels" . | indent 4 }} +spec: + controlNamespace: {{ .Release.Namespace }} + fluentbit: + image: + repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }} + tag: {{ .Values.images.fluentbit.tag }} + inputTail: + Tag: "k3s" + Path: "/var/log/k3s.log" + extraVolumeMounts: + - source: "/var/log/" + destination: "/var/log" + readOnly: true + {{- if .Values.global.psp.enabled }} + security: + podSecurityPolicyCreate: true + roleBasedAccessControlCreate: true + {{- end }} + {{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }} + {{- with $total_tolerations }} + tolerations: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentbit.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + fluentd: + image: + repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }} + tag: {{ .Values.images.fluentd.tag }} + configReloaderImage: + repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }} + tag: {{ .Values.images.config_reloader.tag }} + disablePvc: {{ .Values.disablePvc }} + {{- if .Values.global.psp.enabled }} + security: + podSecurityPolicyCreate: true + roleBasedAccessControlCreate: true + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentd.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentd.livenessProbe }} + livenessProbe: {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/k3s/logging-k3s-systemd.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/k3s/logging-k3s-systemd.yaml new file mode 100755 index 000000000..c4b3db0e7 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/k3s/logging-k3s-systemd.yaml @@ -0,0 +1,68 @@ +{{- if and .Values.additionalLoggingSources.k3s.enabled (eq .Values.additionalLoggingSources.k3s.container_engine "systemd")}} +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Logging +metadata: + name: {{ .Release.Name }}-k3s + namespace: {{ .Release.Namespace }} + labels: +{{ include "logging-operator.labels" . | indent 4 }} +spec: + controlNamespace: {{ .Release.Namespace }} + fluentbit: + image: + repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }} + tag: {{ .Values.images.fluentbit.tag }} + inputTail: + Tag: "k3s" + Path: "/var/log/syslog" + extraVolumeMounts: + - source: "/var/log/" + destination: "/var/log" + readOnly: true + {{- if .Values.global.psp.enabled }} + security: + podSecurityPolicyCreate: true + roleBasedAccessControlCreate: true + {{- end }} + {{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }} + {{- with $total_tolerations }} + tolerations: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentbit.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + fluentd: + image: + repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }} + tag: {{ .Values.images.fluentd.tag }} + configReloaderImage: + repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }} + tag: {{ .Values.images.config_reloader.tag }} + disablePvc: {{ .Values.disablePvc }} + {{- if .Values.global.psp.enabled }} + security: + podSecurityPolicyCreate: true + roleBasedAccessControlCreate: true + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentd.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentd.livenessProbe }} + livenessProbe: {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke/configmap.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke/configmap.yaml new file mode 100755 index 000000000..ab91d93e2 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke/configmap.yaml @@ -0,0 +1,29 @@ +{{- if .Values.additionalLoggingSources.rke.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-rke + labels: +{{ include "logging-operator.labels" . | indent 4 }} +data: + fluent-bit.conf: | + [SERVICE] + Log_Level {{ .Values.additionalLoggingSources.rke.fluentbit.log_level }} + Parsers_File parsers.conf + + [INPUT] + Tag rke + Name tail + Path_Key filename + Parser docker + DB /tail-db/tail-containers-state.db + Mem_Buf_Limit {{ .Values.additionalLoggingSources.rke.fluentbit.mem_buffer_limit }} + Path /var/lib/rancher/rke/log/*.log + + [OUTPUT] + Name forward + Match * + Host {{ .Release.Name }}-fluentd.{{ .Release.Namespace }}.svc + Port 24240 + Retry_Limit False +{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke/daemonset.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke/daemonset.yaml new file mode 100755 index 000000000..840b3e722 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke/daemonset.yaml @@ -0,0 +1,124 @@ +{{- if .Values.additionalLoggingSources.rke.enabled }} +{{- $containers := printf "%s/containers/" (default "/var/lib/docker" .Values.global.dockerRootDirectory) }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: "{{ .Release.Name }}-rke-aggregator" + namespace: "{{ .Release.Namespace }}" +spec: + selector: + matchLabels: + name: {{ .Release.Name }}-rke-aggregator + template: + metadata: + name: "{{ .Release.Name }}-rke-aggregator" + namespace: "{{ .Release.Namespace }}" + labels: + name: {{ .Release.Name }}-rke-aggregator + spec: + containers: + - name: fluentbit + image: "{{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}:{{ .Values.images.fluentbit.tag }}" + volumeMounts: + - mountPath: /var/lib/rancher/rke/log/ + name: indir + - mountPath: {{ $containers }} + name: containers + - mountPath: /tail-db + name: positiondb + - mountPath: /fluent-bit/etc/fluent-bit.conf + name: config + subPath: fluent-bit.conf + {{- if .Values.global.seLinux.enabled }} + securityContext: + seLinuxOptions: + type: rke_logreader_t + {{- end }} + volumes: + - name: indir + hostPath: + path: /var/lib/rancher/rke/log/ + type: DirectoryOrCreate + - name: containers + hostPath: + path: {{ $containers }} + type: DirectoryOrCreate + - name: positiondb + emptyDir: {} + - name: config + configMap: + name: "{{ .Release.Name }}-rke" + serviceAccountName: "{{ .Release.Name }}-rke-aggregator" + {{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }} + {{- with $total_tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "{{ .Release.Name }}-rke-aggregator" + namespace: "{{ .Release.Namespace }}" +{{- if .Values.global.psp.enabled }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: "{{ .Release.Name }}-rke-aggregator" +rules: + - apiGroups: + - policy + resourceNames: + - "{{ .Release.Name }}-rke-aggregator" + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: "{{ .Release.Name }}-rke-aggregator" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "{{ .Release.Name }}-rke-aggregator" +subjects: + - kind: ServiceAccount + name: "{{ .Release.Name }}-rke-aggregator" +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: "{{ .Release.Name }}-rke-aggregator" + namespace: "{{ .Release.Namespace }}" +spec: + allowPrivilegeEscalation: false + allowedHostPaths: + - pathPrefix: {{ $containers }} + readOnly: false + - pathPrefix: /var/lib/rancher/rke/log/ + readOnly: false + - pathPrefix: /var/lib/rancher/logging/ + readOnly: false + fsGroup: + rule: RunAsAny + readOnlyRootFilesystem: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - configMap + - emptyDir + - secret + - hostPath +{{- end }} +{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke2/configmap.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke2/configmap.yaml new file mode 100755 index 000000000..f1ba032d5 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke2/configmap.yaml @@ -0,0 +1,22 @@ +{{- if .Values.additionalLoggingSources.rke2.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-rke2 + labels: +{{ include "logging-operator.labels" . | indent 4 }} +data: + fluent-bit.conf: | + [INPUT] + Name systemd + Tag rke2 + Path {{ .Values.systemdLogPath | default "/var/log/journal" }} + Systemd_Filter _SYSTEMD_UNIT=rke2-server.service + + [OUTPUT] + Name forward + Match * + Host {{ .Release.Name }}-fluentd.{{ .Release.Namespace }}.svc + Port 24240 + Retry_Limit False +{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke2/daemonset.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke2/daemonset.yaml new file mode 100755 index 000000000..f45b74b76 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke2/daemonset.yaml @@ -0,0 +1,110 @@ +{{- if .Values.additionalLoggingSources.rke2.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: "{{ .Release.Name }}-rke2-journald-aggregator" + namespace: "{{ .Release.Namespace }}" +spec: + selector: + matchLabels: + name: {{ .Release.Name }}-rke2-journald-aggregator + template: + metadata: + name: "{{ .Release.Name }}-rke2-journald-aggregator" + namespace: "{{ .Release.Namespace }}" + labels: + name: {{ .Release.Name }}-rke2-journald-aggregator + spec: + containers: + - name: fluentd + image: "{{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }}:{{ .Values.images.fluentbit.tag }}" + {{- if .Values.global.seLinux.enabled }} + securityContext: + seLinuxOptions: + type: rke_logreader_t + {{- end }} + volumeMounts: + - mountPath: /fluent-bit/etc/ + name: config + - mountPath: {{ .Values.systemdLogPath | default "/var/log/journal" }} + name: journal + readOnly: true + - mountPath: /etc/machine-id + name: machine-id + readOnly: true + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: "{{ .Release.Name }}-rke2-journald-aggregator" + volumes: + - name: config + configMap: + name: "{{ .Release.Name }}-rke2" + - name: journal + hostPath: + path: {{ .Values.systemdLogPath | default "/var/log/journal" }} + - name: machine-id + hostPath: + path: /etc/machine-id +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "{{ .Release.Name }}-rke2-journald-aggregator" + namespace: "{{ .Release.Namespace }}" +{{- if .Values.global.psp.enabled }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: "{{ .Release.Name }}-rke2-journald-aggregator" +rules: + - apiGroups: + - policy + resourceNames: + - "{{ .Release.Name }}-rke2-journald-aggregator" + resources: + - podsecuritypolicies + verbs: + - use +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: "{{ .Release.Name }}-rke2-journald-aggregator" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: "{{ .Release.Name }}-rke2-journald-aggregator" +subjects: + - kind: ServiceAccount + name: "{{ .Release.Name }}-rke2-journald-aggregator" +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: "{{ .Release.Name }}-rke2-journald-aggregator" + namespace: "{{ .Release.Namespace }}" +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: RunAsAny + readOnlyRootFilesystem: true + runAsUser: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - configMap + - emptyDir + - secret + - hostPath +{{- end }} +{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke2/logging-rke2-containers.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke2/logging-rke2-containers.yaml new file mode 100755 index 000000000..7be4972e7 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/rke2/logging-rke2-containers.yaml @@ -0,0 +1,73 @@ +{{- if .Values.additionalLoggingSources.rke2.enabled }} +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Logging +metadata: + name: {{ .Release.Name }}-rke2-containers + namespace: {{ .Release.Namespace }} +spec: + controlNamespace: {{ .Release.Namespace }} + fluentbit: + image: + repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }} + tag: {{ .Values.images.fluentbit.tag }} + inputTail: + Tag: "rke2" + Path: "/var/log/containers/*rke*.log" + extraVolumeMounts: + - source: "/var/log/containers/" + destination: "/var/log/containers/" + readOnly: true + {{- if or .Values.global.psp.enabled .Values.global.seLinux.enabled }} + security: + {{- end }} + {{- if or .Values.global.psp.enabled }} + podSecurityPolicyCreate: true + roleBasedAccessControlCreate: true + {{- end }} + {{- if .Values.global.seLinux.enabled }} + securityContext: + seLinuxOptions: + type: rke_logreader_t + {{- end }} + {{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }} + {{- with $total_tolerations }} + tolerations: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentbit.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + fluentd: + image: + repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }} + tag: {{ .Values.images.fluentd.tag }} + configReloaderImage: + repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }} + tag: {{ .Values.images.config_reloader.tag }} + disablePvc: {{ .Values.disablePvc }} + {{- if .Values.global.psp.enabled }} + security: + podSecurityPolicyCreate: true + roleBasedAccessControlCreate: true + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentd.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentd.livenessProbe }} + livenessProbe: {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/root/logging.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/root/logging.yaml new file mode 100755 index 000000000..ca85a4c94 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/loggings/root/logging.yaml @@ -0,0 +1,111 @@ +{{- $containers := printf "%s/containers/" (default "/var/lib/docker" .Values.global.dockerRootDirectory) }} +apiVersion: logging.banzaicloud.io/v1beta1 +kind: Logging +metadata: + name: {{ .Release.Name }} + namespace: {{ .Release.Namespace }} + labels: +{{ include "logging-operator.labels" . | indent 4 }} +spec: + controlNamespace: {{ .Release.Namespace }} + {{- if (include "windowsEnabled" .) }} + nodeAgents: + - name: win-agent + profile: windows + nodeAgentFluentbit: + daemonSet: + spec: + template: + spec: + containers: + - image: "{{ template "system_default_registry" . }}{{ .Values.images.nodeagent_fluentbit.repository }}:{{ .Values.images.nodeagent_fluentbit.tag }}" + name: fluent-bit + tls: + enabled: {{ .Values.nodeAgents.tls.enabled | default false }} + {{- if .Values.additionalLoggingSources.rke.enabled }} + - name: win-agent-rke + profile: windows + nodeAgentFluentbit: + filterKubernetes: + Kube_Tag_Prefix: "kuberentes.C.var.lib.rancher.rke.log." + inputTail: + Path: "C:\\var\\lib\\rancher\\rke\\log" + extraVolumeMounts: + - source: "/var/lib/rancher/rke/log" + destination: "/var/lib/rancher/rke/log" + readOnly: true + daemonSet: + spec: + template: + spec: + containers: + - image: "{{ template "system_default_registry" . }}{{ .Values.images.nodeagent_fluentbit.repository }}:{{ .Values.images.nodeagent_fluentbit.tag }}" + name: fluent-bit + tls: + enabled: {{ .Values.nodeAgents.tls.enabled | default false }} + {{- end }} + {{- end }} + fluentbit: + image: + repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentbit.repository }} + tag: {{ .Values.images.fluentbit.tag }} + {{- if or .Values.global.psp.enabled .Values.global.seLinux.enabled }} + security: + {{- end }} + {{- if .Values.global.psp.enabled }} + podSecurityPolicyCreate: true + roleBasedAccessControlCreate: true + {{- end }} + {{- if .Values.global.seLinux.enabled }} + securityContext: + seLinuxOptions: + type: rke_logreader_t + {{- end }} + {{- if .Values.global.dockerRootDirectory }} + mountPath: {{ $containers }} + extraVolumeMounts: + - source: {{ $containers }} + destination: {{ $containers }} + readOnly: true + {{- end }} + {{- $total_tolerations := concat (.Values.tolerations) (.Values.fluentbit.tolerations) }} + {{- with $total_tolerations }} + tolerations: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentbit.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + fluentd: + image: + repository: {{ template "system_default_registry" . }}{{ .Values.images.fluentd.repository }} + tag: {{ .Values.images.fluentd.tag }} + configReloaderImage: + repository: {{ template "system_default_registry" . }}{{ .Values.images.config_reloader.repository }} + tag: {{ .Values.images.config_reloader.tag }} + disablePvc: {{ .Values.disablePvc }} + {{- if .Values.global.psp.enabled }} + security: + podSecurityPolicyCreate: true + roleBasedAccessControlCreate: true + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentd.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.fluentd.livenessProbe }} + livenessProbe: {{- toYaml . | nindent 6 }} + {{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/psp.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/psp.yaml new file mode 100755 index 000000000..420067f59 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/psp.yaml @@ -0,0 +1,33 @@ +{{ if and .Values.rbac.enabled .Values.rbac.psp.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: psp.logging-operator + namespace: {{ include "logging-operator.namespace" . }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' + labels: +{{ include "logging-operator.labels" . | indent 4 }} +spec: + readOnlyRootFilesystem: true + privileged: false + allowPrivilegeEscalation: false + runAsUser: + rule: MustRunAsNonRoot + fsGroup: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + supplementalGroups: + rule: MustRunAs + ranges: + - min: 1 + max: 65535 + seLinux: + rule: RunAsAny + volumes: + - secret + - configMap +{{ end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/service.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/service.yaml new file mode 100755 index 000000000..f419ae2c4 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "logging-operator.fullname" . }} + namespace: {{ include "logging-operator.namespace" . }} + labels: +{{ include "logging-operator.labels" . | indent 4 }} +spec: + type: ClusterIP + {{- with .Values.http.service.clusterIP }} + clusterIP: {{ . }} + {{- end }} + ports: + - port: {{ .Values.http.port }} + targetPort: http + protocol: TCP + name: http + selector: + app.kubernetes.io/name: {{ include "logging-operator.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/serviceMonitor.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/serviceMonitor.yaml new file mode 100755 index 000000000..1bb762cde --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/serviceMonitor.yaml @@ -0,0 +1,30 @@ +{{ if .Values.monitoring.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "logging-operator.fullname" . }} + namespace: {{ include "logging-operator.namespace" . }} + labels: +{{ include "logging-operator.labels" . | indent 4 }} +{{- with .Values.monitoring.serviceMonitor.additionalLabels }} + {{- toYaml . | nindent 4 }} +{{- end }} +spec: + selector: + matchLabels: +{{ include "logging-operator.labels" . | indent 6 }} + endpoints: + - port: http + path: /metrics + {{- with .Values.monitoring.serviceMonitor.metricsRelabelings }} + metricRelabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.monitoring.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 4 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "logging-operator.namespace" . }} +{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/serviceaccount.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/serviceaccount.yaml new file mode 100755 index 000000000..cbb2a94b4 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/serviceaccount.yaml @@ -0,0 +1,10 @@ +{{- if .Values.rbac.enabled }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "logging-operator.fullname" . }} + namespace: {{ include "logging-operator.namespace" . }} + labels: +{{ include "logging-operator.labels" . | indent 4 }} +{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/userroles.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/userroles.yaml new file mode 100755 index 000000000..f4136b09a --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/userroles.yaml @@ -0,0 +1,35 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: "logging-admin" + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: + - "logging.banzaicloud.io" + resources: + - flows + - outputs + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: "logging-view" + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" +rules: + - apiGroups: + - "logging.banzaicloud.io" + resources: + - flows + - outputs + - clusterflows + - clusteroutputs + verbs: + - get + - list + - watch diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/validate-install-crd.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/validate-install-crd.yaml new file mode 100755 index 000000000..66e8725e5 --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/validate-install-crd.yaml @@ -0,0 +1,18 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +# {{- $found := dict -}} +# {{- set $found "logging.banzaicloud.io/v1beta1/ClusterFlow" false -}} +# {{- set $found "logging.banzaicloud.io/v1beta1/ClusterOutput" false -}} +# {{- set $found "logging.banzaicloud.io/v1beta1/Flow" false -}} +# {{- set $found "logging.banzaicloud.io/v1beta1/Logging" false -}} +# {{- set $found "logging.banzaicloud.io/v1beta1/Output" false -}} +# {{- range .Capabilities.APIVersions -}} +# {{- if hasKey $found (toString .) -}} +# {{- set $found (toString .) true -}} +# {{- end -}} +# {{- end -}} +# {{- range $_, $exists := $found -}} +# {{- if (eq $exists false) -}} +# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}} +# {{- end -}} +# {{- end -}} +#{{- end -}} \ No newline at end of file diff --git a/charts/rancher-logging/rancher-logging/3.9.400/templates/validate-install.yaml b/charts/rancher-logging/rancher-logging/3.9.400/templates/validate-install.yaml new file mode 100755 index 000000000..bd624cc4b --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/templates/validate-install.yaml @@ -0,0 +1,5 @@ +#{{- if .Values.global.dockerRootDirectory }} +#{{- if or (hasSuffix "/containers" .Values.global.dockerRootDirectory) (hasSuffix "/" .Values.global.dockerRootDirectory) }} +#{{- required "global.dockerRootDirectory must not end with suffix: '/' or '/containers'" "" -}} +#{{- end }} +#{{- end }} diff --git a/charts/rancher-logging/rancher-logging/3.9.400/values.yaml b/charts/rancher-logging/rancher-logging/3.9.400/values.yaml new file mode 100755 index 000000000..a1e53937d --- /dev/null +++ b/charts/rancher-logging/rancher-logging/3.9.400/values.yaml @@ -0,0 +1,176 @@ +# Default values for logging-operator. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rancher/mirrored-banzaicloud-logging-operator + tag: 3.9.4 + pullPolicy: IfNotPresent + +extraArgs: [] +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" +namespaceOverride: "" + +## Pod custom labels +## +podLabels: {} + +annotations: {} + +## Deploy CRDs used by Logging Operator. +## +createCustomResource: false + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: + kubernetes.io/os: linux + +tolerations: + - key: cattle.io/os + operator: "Equal" + value: "linux" + effect: NoSchedule + +affinity: {} + +http: + # http listen port number + port: 8080 + # Service definition for query http service + service: + type: ClusterIP + clusterIP: None + # Annotations to query http service + annotations: {} + # Labels to query http service + labels: {} + +# These "rbac" settings match the upstream defaults. For only using psp in the overlay files, which +# include the default Logging CRs created, see the "global.psp" setting. To enable psp for the entire +# chart, enable both "rbac.psp" and "global.psp" (this may require further changes to the chart). +rbac: + enabled: true + psp: + enabled: false + +## SecurityContext holds pod-level security attributes and common container settings. +## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +podSecurityContext: {} +# runAsNonRoot: true +# runAsUser: 1000 +# fsGroup: 2000 +securityContext: {} +# allowPrivilegeEscalation: false +# readOnlyRootFilesystem: true + # capabilities: + # drop: ["ALL"] + +## Operator priorityClassName +## +priorityClassName: {} + +monitoring: + # Create a Prometheus Operator ServiceMonitor object + serviceMonitor: + enabled: false + additionalLabels: {} + metricRelabelings: [] + relabelings: [] + +disablePvc: true + +systemdLogPath: "/run/log/journal" + +additionalLoggingSources: + rke: + enabled: false + fluentbit: + log_level: "info" + mem_buffer_limit: "5MB" + rke2: + enabled: false + k3s: + enabled: false + container_engine: "systemd" + aks: + enabled: false + eks: + enabled: false + gke: + enabled: false + +images: + config_reloader: + repository: rancher/mirrored-jimmidyson-configmap-reload + tag: v0.4.0 + fluentbit: + repository: rancher/mirrored-fluent-fluent-bit + tag: 1.6.10 + fluentbit_debug: + repository: rancher/mirrored-fluent-fluent-bit + tag: 1.6.10-debug + fluentd: + repository: rancher/mirrored-banzaicloud-fluentd + tag: v1.11.5-alpine-12 + nodeagent_fluentbit: + os: "windows,linux" + repository: rancher/fluent-bit + tag: 1.6.10 + +# These settings apply to every Logging CR, including vendor Logging CRs enabled in "additionalLoggingSources". +# Changing these affects every Logging CR installed. +nodeAgents: + tls: + enabled: false +fluentd: + resources: {} + livenessProbe: + tcpSocket: + port: 24240 + initialDelaySeconds: 30 + periodSeconds: 15 +fluentbit: + resources: {} + tolerations: + - key: node-role.kubernetes.io/controlplane + value: "true" + effect: NoSchedule + - key: node-role.kubernetes.io/etcd + value: "true" + effect: NoExecute + +global: + cattle: + systemDefaultRegistry: "" + # Uncomment the below two lines to either enable or disable Windows logging. If this chart is + # installed via the Rancher UI, it will set this value to "true" if the cluster is a Windows + # cluster. In that scenario, if you would like to disable Windows logging on Windows clusters, + # set the value below to "false". + # windows: + # enabled: true + # Change the "dockerRootDirectory" if the default Docker directory has changed. + dockerRootDirectory: "" + # This psp setting differs from the upstream "rbac.psp" by only enabling psp settings for the + # overlay files, which include the Logging CRs created, whereas the upstream "rbac.psp" affects the + # logging operator. + psp: + enabled: true + seLinux: + enabled: false diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/Chart.yaml new file mode 100755 index 000000000..2b548e860 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/Chart.yaml @@ -0,0 +1,10 @@ +annotations: + catalog.cattle.io/certified: rancher + catalog.cattle.io/hidden: "true" + catalog.cattle.io/namespace: cattle-monitoring-system + catalog.cattle.io/release-name: rancher-monitoring-crd +apiVersion: v1 +description: Installs the CRDs for rancher-monitoring. +name: rancher-monitoring-crd +type: application +version: 14.5.100 diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/README.md b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/README.md new file mode 100755 index 000000000..e0b63e026 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/README.md @@ -0,0 +1,24 @@ +# rancher-monitoring-crd +A Rancher chart that installs the CRDs used by rancher-monitoring. + +## How does this chart work? + +This chart marshalls all of the CRD files placed in the `crd-manifest` directory into a ConfigMap that is installed onto a cluster alongside relevant RBAC (ServiceAccount, ClusterRoleBinding, ClusterRole, and PodSecurityPolicy). + +Once the relevant dependent resourcees are installed / upgraded / rolled back, this chart executes a post-install / post-upgrade / post-rollback Job that: +- Patches any existing versions of the CRDs contained within the `crd-manifest` on the cluster to set `spec.preserveUnknownFields=false`; this step is required since, based on [Kubernetes docs](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning) and a [known workaround](https://github.com/kubernetes-sigs/controller-tools/issues/476#issuecomment-691519936), such CRDs cannot be upgraded normally from `apiextensions.k8s.io/v1beta1` to `apiextensions.k8s.io/v1`. +- Runs a `kubectl apply` on the CRDs that are contained within the crd-manifest ConfigMap to upgrade CRDs in the cluster + +On an uninstall, this chart executes a separate post-delete Job that: +- Patches any existing versions of the CRDs contained within `crd-manifest` on the cluster to set `metadata.finalizers=[]` +- Runs a `kubectl delete` on the CRDs that are contained within the crd-manifest ConfigMap to clean up the CRDs from the cluster + +Note: If the relevant CRDs already existed in the cluster at the time of install, this chart will absorb ownership of the lifecycle of those CRDs; therefore, on a `helm uninstall`, those CRDs will also be removed from the cluster alongside this chart. + +## Why can't we just place the CRDs in the templates/ directory of the main chart? + +In Helm today, you cannot declare a CRD and declare a resource of that CRD's kind in templates/ without encountering a failure on render. + +## [Helm 3] Why can't we just place the CRDs in the crds/ directory of the main chart? + +The Helm 3 `crds/` directory only supports the installation of CRDs, but does not support the upgrade and removal of CRDs, unlike what this chart facilitiates. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-alertmanagerconfigs.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-alertmanagerconfigs.yaml new file mode 100755 index 000000000..b2ed16186 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-alertmanagerconfigs.yaml @@ -0,0 +1,1869 @@ +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: alertmanagerconfigs.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: AlertmanagerConfig + listKind: AlertmanagerConfigList + plural: alertmanagerconfigs + singular: alertmanagerconfig + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: AlertmanagerConfig defines a namespaced AlertmanagerConfig to be aggregated across multiple namespaces configuring one Alertmanager cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AlertmanagerConfigSpec is a specification of the desired behavior of the Alertmanager configuration. By definition, the Alertmanager configuration only applies to alerts for which the `namespace` label is equal to the namespace of the AlertmanagerConfig resource. + properties: + inhibitRules: + description: List of inhibition rules. The rules will only apply to alerts matching the resource’s namespace. + items: + description: InhibitRule defines an inhibition rule that allows to mute alerts when other alerts are already firing. See https://prometheus.io/docs/alerting/latest/configuration/#inhibit_rule + properties: + equal: + description: Labels that must have an equal value in the source and target alert for the inhibition to take effect. + items: + type: string + type: array + sourceMatch: + description: Matchers for which one or more alerts have to exist for the inhibition to take effect. The operator enforces that the alert matches the resource’s namespace. + items: + description: Matcher defines how to match on alert's labels. + properties: + name: + description: Label to match. + minLength: 1 + type: string + regex: + description: Whether to match on equality (false) or regular-expression (true). + type: boolean + value: + description: Label value to match. + type: string + required: + - name + type: object + type: array + targetMatch: + description: Matchers that have to be fulfilled in the alerts to be muted. The operator enforces that the alert matches the resource’s namespace. + items: + description: Matcher defines how to match on alert's labels. + properties: + name: + description: Label to match. + minLength: 1 + type: string + regex: + description: Whether to match on equality (false) or regular-expression (true). + type: boolean + value: + description: Label value to match. + type: string + required: + - name + type: object + type: array + type: object + type: array + receivers: + description: List of receivers. + items: + description: Receiver defines one or more notification integrations. + properties: + emailConfigs: + description: List of Email configurations. + items: + description: EmailConfig configures notifications via Email. + properties: + authIdentity: + description: The identity to use for authentication. + type: string + authPassword: + description: The secret's key that contains the password to use for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + authSecret: + description: The secret's key that contains the CRAM-MD5 secret. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + authUsername: + description: The username to use for authentication. + type: string + from: + description: The sender address. + type: string + headers: + description: Further headers email header key/value pairs. Overrides any headers previously set by the notification implementation. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + hello: + description: The hostname to identify to the SMTP server. + type: string + html: + description: The HTML body of the email notification. + type: string + requireTLS: + description: The SMTP TLS requirement. Note that Go does not support unencrypted connections to remote SMTP endpoints. + type: boolean + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + smarthost: + description: The SMTP host through which emails are sent. + type: string + text: + description: The text body of the email notification. + type: string + tlsConfig: + description: TLS configuration + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + to: + description: The email address to send notifications to. + type: string + type: object + type: array + name: + description: Name of the receiver. Must be unique across all items from the list. + minLength: 1 + type: string + opsgenieConfigs: + description: List of OpsGenie configurations. + items: + description: OpsGenieConfig configures notifications via OpsGenie. See https://prometheus.io/docs/alerting/latest/configuration/#opsgenie_config + properties: + apiKey: + description: The secret's key that contains the OpsGenie API key. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + apiURL: + description: The URL to send OpsGenie API requests to. + type: string + description: + description: Description of the incident. + type: string + details: + description: A set of arbitrary key/value pairs that provide further detail about the incident. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + httpConfig: + description: HTTP client configuration. + properties: + basicAuth: + description: BasicAuth for the client. + properties: + password: + description: The secret in the service monitor namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + message: + description: Alert text limited to 130 characters. + type: string + note: + description: Additional alert note. + type: string + priority: + description: Priority level of alert. Possible values are P1, P2, P3, P4, and P5. + type: string + responders: + description: List of responders responsible for notifications. + items: + description: OpsGenieConfigResponder defines a responder to an incident. One of `id`, `name` or `username` has to be defined. + properties: + id: + description: ID of the responder. + type: string + name: + description: Name of the responder. + type: string + type: + description: Type of responder. + minLength: 1 + type: string + username: + description: Username of the responder. + type: string + required: + - type + type: object + type: array + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + source: + description: Backlink to the sender of the notification. + type: string + tags: + description: Comma separated list of tags attached to the notifications. + type: string + type: object + type: array + pagerdutyConfigs: + description: List of PagerDuty configurations. + items: + description: PagerDutyConfig configures notifications via PagerDuty. See https://prometheus.io/docs/alerting/latest/configuration/#pagerduty_config + properties: + class: + description: The class/type of the event. + type: string + client: + description: Client identification. + type: string + clientURL: + description: Backlink to the sender of notification. + type: string + component: + description: The part or component of the affected system that is broken. + type: string + description: + description: Description of the incident. + type: string + details: + description: Arbitrary key/value pairs that provide further detail about the incident. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + group: + description: A cluster or grouping of sources. + type: string + httpConfig: + description: HTTP client configuration. + properties: + basicAuth: + description: BasicAuth for the client. + properties: + password: + description: The secret in the service monitor namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + routingKey: + description: The secret's key that contains the PagerDuty integration key (when using Events API v2). Either this field or `serviceKey` needs to be defined. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + serviceKey: + description: The secret's key that contains the PagerDuty service key (when using integration type "Prometheus"). Either this field or `routingKey` needs to be defined. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + severity: + description: Severity of the incident. + type: string + url: + description: The URL to send requests to. + type: string + type: object + type: array + pushoverConfigs: + description: List of Pushover configurations. + items: + description: PushoverConfig configures notifications via Pushover. See https://prometheus.io/docs/alerting/latest/configuration/#pushover_config + properties: + expire: + description: How long your notification will continue to be retried for, unless the user acknowledges the notification. + type: string + html: + description: Whether notification message is HTML or plain text. + type: boolean + httpConfig: + description: HTTP client configuration. + properties: + basicAuth: + description: BasicAuth for the client. + properties: + password: + description: The secret in the service monitor namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + message: + description: Notification message. + type: string + priority: + description: Priority, see https://pushover.net/api#priority + type: string + retry: + description: How often the Pushover servers will send the same notification to the user. Must be at least 30 seconds. + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + sound: + description: The name of one of the sounds supported by device clients to override the user's default sound choice + type: string + title: + description: Notification title. + type: string + token: + description: The secret's key that contains the registered application’s API token, see https://pushover.net/apps. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + url: + description: A supplementary URL shown alongside the message. + type: string + urlTitle: + description: A title for supplementary URL, otherwise just the URL is shown + type: string + userKey: + description: The secret's key that contains the recipient user’s user key. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + type: array + slackConfigs: + description: List of Slack configurations. + items: + description: SlackConfig configures notifications via Slack. See https://prometheus.io/docs/alerting/latest/configuration/#slack_config + properties: + actions: + description: A list of Slack actions that are sent with each notification. + items: + description: SlackAction configures a single Slack action that is sent with each notification. See https://api.slack.com/docs/message-attachments#action_fields and https://api.slack.com/docs/message-buttons for more information. + properties: + confirm: + description: SlackConfirmationField protect users from destructive actions or particularly distinguished decisions by asking them to confirm their button click one more time. See https://api.slack.com/docs/interactive-message-field-guide#confirmation_fields for more information. + properties: + dismissText: + type: string + okText: + type: string + text: + minLength: 1 + type: string + title: + type: string + required: + - text + type: object + name: + type: string + style: + type: string + text: + minLength: 1 + type: string + type: + minLength: 1 + type: string + url: + type: string + value: + type: string + required: + - text + - type + type: object + type: array + apiURL: + description: The secret's key that contains the Slack webhook URL. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + callbackId: + type: string + channel: + description: The channel or user to send notifications to. + type: string + color: + type: string + fallback: + type: string + fields: + description: A list of Slack fields that are sent with each notification. + items: + description: SlackField configures a single Slack field that is sent with each notification. Each field must contain a title, value, and optionally, a boolean value to indicate if the field is short enough to be displayed next to other fields designated as short. See https://api.slack.com/docs/message-attachments#fields for more information. + properties: + short: + type: boolean + title: + minLength: 1 + type: string + value: + minLength: 1 + type: string + required: + - title + - value + type: object + type: array + footer: + type: string + httpConfig: + description: HTTP client configuration. + properties: + basicAuth: + description: BasicAuth for the client. + properties: + password: + description: The secret in the service monitor namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + iconEmoji: + type: string + iconURL: + type: string + imageURL: + type: string + linkNames: + type: boolean + mrkdwnIn: + items: + type: string + type: array + pretext: + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + shortFields: + type: boolean + text: + type: string + thumbURL: + type: string + title: + type: string + titleLink: + type: string + username: + type: string + type: object + type: array + victoropsConfigs: + description: List of VictorOps configurations. + items: + description: VictorOpsConfig configures notifications via VictorOps. See https://prometheus.io/docs/alerting/latest/configuration/#victorops_config + properties: + apiKey: + description: The secret's key that contains the API key to use when talking to the VictorOps API. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + apiUrl: + description: The VictorOps API URL. + type: string + customFields: + description: Additional custom fields for notification. + items: + description: KeyValue defines a (key, value) tuple. + properties: + key: + description: Key of the tuple. + minLength: 1 + type: string + value: + description: Value of the tuple. + type: string + required: + - key + - value + type: object + type: array + entityDisplayName: + description: Contains summary of the alerted problem. + type: string + httpConfig: + description: The HTTP client's configuration. + properties: + basicAuth: + description: BasicAuth for the client. + properties: + password: + description: The secret in the service monitor namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + messageType: + description: Describes the behavior of the alert (CRITICAL, WARNING, INFO). + type: string + monitoringTool: + description: The monitoring tool the state message is from. + type: string + routingKey: + description: A key used to map the alert to a team. + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + stateMessage: + description: Contains long explanation of the alerted problem. + type: string + type: object + type: array + webhookConfigs: + description: List of webhook configurations. + items: + description: WebhookConfig configures notifications via a generic receiver supporting the webhook payload. See https://prometheus.io/docs/alerting/latest/configuration/#webhook_config + properties: + httpConfig: + description: HTTP client configuration. + properties: + basicAuth: + description: BasicAuth for the client. + properties: + password: + description: The secret in the service monitor namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + maxAlerts: + description: Maximum number of alerts to be sent per webhook message. When 0, all alerts are included. + format: int32 + minimum: 0 + type: integer + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + url: + description: The URL to send HTTP POST requests to. `urlSecret` takes precedence over `url`. One of `urlSecret` and `url` should be defined. + type: string + urlSecret: + description: The secret's key that contains the webhook URL to send HTTP requests to. `urlSecret` takes precedence over `url`. One of `urlSecret` and `url` should be defined. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + type: array + wechatConfigs: + description: List of WeChat configurations. + items: + description: WeChatConfig configures notifications via WeChat. See https://prometheus.io/docs/alerting/latest/configuration/#wechat_config + properties: + agentID: + type: string + apiSecret: + description: The secret's key that contains the WeChat API key. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + apiURL: + description: The WeChat API URL. + type: string + corpID: + description: The corp id for authentication. + type: string + httpConfig: + description: HTTP client configuration. + properties: + basicAuth: + description: BasicAuth for the client. + properties: + password: + description: The secret in the service monitor namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: The secret's key that contains the bearer token to be used by the client for authentication. The secret needs to be in the same namespace as the AlertmanagerConfig object and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + proxyURL: + description: Optional proxy URL. + type: string + tlsConfig: + description: TLS configuration for the client. + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + message: + description: API request data as defined by the WeChat API. + type: string + messageType: + type: string + sendResolved: + description: Whether or not to notify about resolved alerts. + type: boolean + toParty: + type: string + toTag: + type: string + toUser: + type: string + type: object + type: array + required: + - name + type: object + type: array + route: + description: The Alertmanager route definition for alerts matching the resource’s namespace. If present, it will be added to the generated Alertmanager configuration as a first-level route. + properties: + continue: + description: Boolean indicating whether an alert should continue matching subsequent sibling nodes. It will always be overridden to true for the first-level route by the Prometheus operator. + type: boolean + groupBy: + description: List of labels to group by. + items: + type: string + type: array + groupInterval: + description: How long to wait before sending an updated notification. Must match the regular expression `[0-9]+(ms|s|m|h)` (milliseconds seconds minutes hours). + type: string + groupWait: + description: How long to wait before sending the initial notification. Must match the regular expression `[0-9]+(ms|s|m|h)` (milliseconds seconds minutes hours). + type: string + matchers: + description: 'List of matchers that the alert’s labels should match. For the first level route, the operator removes any existing equality and regexp matcher on the `namespace` label and adds a `namespace: ` matcher.' + items: + description: Matcher defines how to match on alert's labels. + properties: + name: + description: Label to match. + minLength: 1 + type: string + regex: + description: Whether to match on equality (false) or regular-expression (true). + type: boolean + value: + description: Label value to match. + type: string + required: + - name + type: object + type: array + receiver: + description: Name of the receiver for this route. If not empty, it should be listed in the `receivers` field. + type: string + repeatInterval: + description: How long to wait before repeating the last notification. Must match the regular expression `[0-9]+(ms|s|m|h)` (milliseconds seconds minutes hours). + type: string + routes: + description: Child routes. + items: + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-alertmanagers.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-alertmanagers.yaml new file mode 100755 index 000000000..724d488b0 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-alertmanagers.yaml @@ -0,0 +1,3218 @@ +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: alertmanagers.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: Alertmanager + listKind: AlertmanagerList + plural: alertmanagers + singular: alertmanager + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The version of Alertmanager + jsonPath: .spec.version + name: Version + type: string + - description: The desired replicas number of Alertmanagers + jsonPath: .spec.replicas + name: Replicas + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: Alertmanager describes an Alertmanager cluster. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the Alertmanager cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + additionalPeers: + description: AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. + items: + type: string + type: array + affinity: + description: If specified, the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + alertmanagerConfigNamespaceSelector: + description: Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + alertmanagerConfigSelector: + description: AlertmanagerConfigs to be selected for to merge and configure Alertmanager with. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + baseImage: + description: 'Base image that is used to deploy pods, without tag. Deprecated: use ''image'' instead' + type: string + clusterAdvertiseAddress: + description: 'ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918' + type: string + clusterGossipInterval: + description: Interval between gossip attempts. + type: string + clusterPeerTimeout: + description: Timeout for cluster peering. + type: string + clusterPushpullInterval: + description: Interval between pushpull attempts. + type: string + configMaps: + description: ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. The ConfigMaps are mounted into /etc/alertmanager/configmaps/. + items: + type: string + type: array + configSecret: + description: ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config. + type: string + containers: + description: 'Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod. Containers described here modify an operator generated container if they share the same name and modifications are done via a strategic merge patch. The current container names are: `alertmanager` and `config-reloader`. Overriding containers is entirely outside the scope of what the maintainers will support and by doing so, you accept that this behaviour may break at any time without notice.' + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The docker image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod''s termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + externalUrl: + description: The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. + type: string + forceEnableClusterMode: + description: ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica. Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each. + type: boolean + image: + description: Image if specified has precedence over baseImage, tag and sha combinations. Specifying the version is still necessary to ensure the Prometheus Operator knows what version of Alertmanager is being configured. + type: string + imagePullSecrets: + description: An optional list of references to secrets in the same namespace to use for pulling prometheus and alertmanager images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'InitContainers allows adding initContainers to the pod definition. Those can be used to e.g. fetch secrets for injection into the Alertmanager configuration from external sources. Any errors during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ Using initContainers for any use case other then secret fetching is entirely outside the scope of what the maintainers will support and by doing so, you accept that this behaviour may break at any time without notice.' + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The docker image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod''s termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + listenLocal: + description: ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. Note this is only for the Alertmanager UI, not the gossip communication. + type: boolean + logFormat: + description: Log format for Alertmanager to be configured with. + type: string + logLevel: + description: Log level for Alertmanager to be configured with. + type: string + nodeSelector: + additionalProperties: + type: string + description: Define which Nodes the Pods are scheduled on. + type: object + paused: + description: If set to true all actions on the underlying managed objects are not goint to be performed, except for delete actions. + type: boolean + podMetadata: + description: PodMetadata configures Labels and Annotations which are propagated to the alertmanager pods. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + portName: + description: Port name used for the pods and governing service. This defaults to web + type: string + priorityClassName: + description: Priority class assigned to the Pods + type: string + replicas: + description: Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the running cluster equal to the expected size. + format: int32 + type: integer + resources: + description: Define resources requests and limits for single Pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + retention: + description: Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression `[0-9]+(ms|s|m|h)` (milliseconds seconds minutes hours). + type: string + routePrefix: + description: The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, but the server serves requests under a different route prefix. For example for use with `kubectl proxy`. + type: string + secrets: + description: Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/. + items: + type: string + type: array + securityContext: + description: SecurityContext holds pod-level security attributes and common container settings. This defaults to the default PodSecurityContext. + properties: + fsGroup: + description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: \n 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership and permissions of any volume." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always".' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + supplementalGroups: + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccountName: + description: ServiceAccountName is the name of the ServiceAccount to use to run the Prometheus Pods. + type: string + sha: + description: 'SHA of Alertmanager container image to be deployed. Defaults to the value of `version`. Similar to a tag, but the SHA explicitly deploys an immutable container image. Version and Tag are ignored if SHA is set. Deprecated: use ''image'' instead. The image digest can be specified as part of the image URL.' + type: string + storage: + description: Storage is the definition of how storage will be used by the Alertmanager instances. + properties: + disableMountSubPath: + description: 'Deprecated: subPath usage will be disabled by default in a future release, this option will become unnecessary. DisableMountSubPath allows to remove any subPath usage in volume mounts.' + type: boolean + emptyDir: + description: 'EmptyDirVolumeSource to be used by the Prometheus StatefulSets. If specified, used in place of any volumeClaimTemplate. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' + properties: + medium: + description: 'What type of storage medium should back this directory. The default is "" which means to use the node''s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + volumeClaimTemplate: + description: A PVC spec to be used by the Prometheus StatefulSets. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: EmbeddedMetadata contains metadata relevant to an EmbeddedResource. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + spec: + description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) * An existing PVC (PersistentVolumeClaim) * An existing custom resource/object that implements data population (Alpha) In order to use VolumeSnapshot object types, the appropriate feature gate must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the specified data source is not supported, the volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + status: + description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Represents the actual resources of the underlying volume. + type: object + conditions: + description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails details about state of pvc + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + format: date-time + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. + type: string + type: object + type: object + type: object + tag: + description: 'Tag of Alertmanager container image to be deployed. Defaults to the value of `version`. Version is ignored if Tag is set. Deprecated: use ''image'' instead. The image tag can be specified as part of the image URL.' + type: string + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: If specified, the pod's topology spread constraints. + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. It''s the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It''s considered as "Unsatisfiable" if and only if placing incoming pod on any topology violates "MaxSkew". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + version: + description: Version the cluster should be on. + type: string + volumeMounts: + description: VolumeMounts allows configuration of additional VolumeMounts on the output StatefulSet definition. VolumeMounts specified will be appended to other VolumeMounts in the alertmanager container, that are generated as a result of StorageSpec objects. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes allows configuration of additional volumes on the output StatefulSet definition. Volumes specified will be appended to other volumes that are generated as a result of StorageSpec objects. + items: + description: Volume represents a named volume in a pod that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". If omitted, the default is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this directory. The default is "" which means to use the node''s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + fc: + description: FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, and downward API + properties: + defaultMode: + description: Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with other supported volume types + properties: + configMap: + description: information about the configMap data to project + properties: + items: + description: If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to project + properties: + items: + description: If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken data to project + properties: + audience: + description: Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the mount point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount user + type: string + volume: + description: Volume is a string that references an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the protection domain. + type: string + system: + description: The name of the storage system as configured in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: 'Most recent observed status of the Alertmanager cluster. Read-only. Not included when requesting from the apiserver, only from the Prometheus Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + availableReplicas: + description: Total number of available pods (ready for at least minReadySeconds) targeted by this Alertmanager cluster. + format: int32 + type: integer + paused: + description: Represents whether any actions on the underlying managed objects are being performed. Only delete actions will be performed. + type: boolean + replicas: + description: Total number of non-terminated pods targeted by this Alertmanager cluster (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable pods targeted by this Alertmanager cluster. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted by this Alertmanager cluster that have the desired version spec. + format: int32 + type: integer + required: + - availableReplicas + - paused + - replicas + - unavailableReplicas + - updatedReplicas + type: object + required: + - spec + type: object + served: true + storage: true + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-podmonitors.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-podmonitors.yaml new file mode 100755 index 000000000..d474a0c0d --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-podmonitors.yaml @@ -0,0 +1,358 @@ +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: podmonitors.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: PodMonitor + listKind: PodMonitorList + plural: podmonitors + singular: podmonitor + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: PodMonitor defines monitoring for a set of pods. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired Pod selection for target discovery by Prometheus. + properties: + jobLabel: + description: The label to use to retrieve the job name from. + type: string + namespaceSelector: + description: Selector to select which namespaces the Endpoints objects are discovered from. + properties: + any: + description: Boolean describing whether all namespaces are selected in contrast to a list restricting them. + type: boolean + matchNames: + description: List of namespace names. + items: + type: string + type: array + type: object + podMetricsEndpoints: + description: A list of endpoints allowed as part of this PodMonitor. + items: + description: PodMetricsEndpoint defines a scrapeable endpoint of a Kubernetes Pod serving Prometheus metrics. + properties: + basicAuth: + description: 'BasicAuth allow an endpoint to authenticate over basic authentication. More info: https://prometheus.io/docs/operating/configuration/#endpoint' + properties: + password: + description: The secret in the service monitor namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenSecret: + description: Secret to mount to read bearer token for scraping targets. The secret needs to be in the same namespace as the pod monitor and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + honorLabels: + description: HonorLabels chooses the metric's labels on collisions with target labels. + type: boolean + honorTimestamps: + description: HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. + type: boolean + interval: + description: Interval at which metrics should be scraped + type: string + metricRelabelings: + description: MetricRelabelConfigs to apply to samples before ingestion. + items: + description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines ``-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available. + type: string + type: object + type: array + params: + additionalProperties: + items: + type: string + type: array + description: Optional HTTP URL parameters + type: object + path: + description: HTTP path to scrape for metrics. + type: string + port: + description: Name of the pod port this endpoint refers to. Mutually exclusive with targetPort. + type: string + proxyUrl: + description: ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. + type: string + relabelings: + description: 'RelabelConfigs to apply to samples before ingestion. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + items: + description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines ``-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available. + type: string + type: object + type: array + scheme: + description: HTTP scheme to use for scraping. + type: string + scrapeTimeout: + description: Timeout after which the scrape is ended + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: 'Deprecated: Use ''port'' instead.' + x-kubernetes-int-or-string: true + tlsConfig: + description: TLS configuration to use when scraping the endpoint. + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + type: array + podTargetLabels: + description: PodTargetLabels transfers labels on the Kubernetes Pod onto the target. + items: + type: string + type: array + sampleLimit: + description: SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + format: int64 + type: integer + selector: + description: Selector to select Pod objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + targetLimit: + description: TargetLimit defines a limit on the number of scraped targets that will be accepted. + format: int64 + type: integer + required: + - podMetricsEndpoints + - selector + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-probes.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-probes.yaml new file mode 100755 index 000000000..7fd658e14 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-probes.yaml @@ -0,0 +1,202 @@ +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: probes.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: Probe + listKind: ProbeList + plural: probes + singular: probe + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Probe defines monitoring for a set of static targets or ingresses. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired Ingress selection for target discovery by Prometheus. + properties: + interval: + description: Interval at which targets are probed using the configured prober. If not specified Prometheus' global scrape interval is used. + type: string + jobName: + description: The job name assigned to scraped metrics by default. + type: string + module: + description: 'The module to use for probing specifying how to probe the target. Example module configuring in the blackbox exporter: https://github.com/prometheus/blackbox_exporter/blob/master/example.yml' + type: string + prober: + description: Specification for the prober to use for probing targets. The prober.URL parameter is required. Targets cannot be probed if left empty. + properties: + path: + description: Path to collect metrics from. Defaults to `/probe`. + type: string + scheme: + description: HTTP scheme to use for scraping. Defaults to `http`. + type: string + url: + description: Mandatory URL of the prober. + type: string + required: + - url + type: object + scrapeTimeout: + description: Timeout for scraping metrics from the Prometheus exporter. + type: string + targets: + description: Targets defines a set of static and/or dynamically discovered targets to be probed using the prober. + properties: + ingress: + description: Ingress defines the set of dynamically discovered ingress objects which hosts are considered for probing. + properties: + namespaceSelector: + description: Select Ingress objects by namespace. + properties: + any: + description: Boolean describing whether all namespaces are selected in contrast to a list restricting them. + type: boolean + matchNames: + description: List of namespace names. + items: + type: string + type: array + type: object + relabelingConfigs: + description: 'RelabelConfigs to apply to samples before ingestion. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + items: + description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines ``-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available. + type: string + type: object + type: array + selector: + description: Select Ingress objects by labels. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + type: object + staticConfig: + description: 'StaticConfig defines static targets which are considers for probing. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config.' + properties: + labels: + additionalProperties: + type: string + description: Labels assigned to all metrics scraped from the targets. + type: object + relabelingConfigs: + description: 'RelabelConfigs to apply to samples before ingestion. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + items: + description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines ``-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available. + type: string + type: object + type: array + static: + description: Targets is a list of URLs to probe using the configured prober. + items: + type: string + type: array + type: object + type: object + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-prometheuses.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-prometheuses.yaml new file mode 100755 index 000000000..c3f13d981 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-prometheuses.yaml @@ -0,0 +1,4432 @@ +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: prometheuses.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: Prometheus + listKind: PrometheusList + plural: prometheuses + singular: prometheus + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The version of Prometheus + jsonPath: .spec.version + name: Version + type: string + - description: The desired replicas number of Prometheuses + jsonPath: .spec.replicas + name: Replicas + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: Prometheus defines a Prometheus deployment. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the Prometheus cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + additionalAlertManagerConfigs: + description: 'AdditionalAlertManagerConfigs allows specifying a key of a Secret containing additional Prometheus AlertManager configurations. AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. Job configurations specified must have the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config. As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + additionalAlertRelabelConfigs: + description: 'AdditionalAlertRelabelConfigs allows specifying a key of a Secret containing additional Prometheus alert relabel configurations. Alert relabel configurations specified are appended to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel configs are going to break Prometheus after the upgrade.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + additionalScrapeConfigs: + description: 'AdditionalScrapeConfigs allows specifying a key of a Secret containing additional Prometheus scrape configurations. Scrape configurations specified are appended to the configurations generated by the Prometheus Operator. Job configurations specified must have the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible scrape configs are going to break Prometheus after the upgrade.' + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + affinity: + description: If specified, the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + alerting: + description: Define details regarding alerting. + properties: + alertmanagers: + description: AlertmanagerEndpoints Prometheus should fire alerts against. + items: + description: AlertmanagerEndpoints defines a selection of a single Endpoints object containing alertmanager IPs to fire alerts against. + properties: + apiVersion: + description: Version of the Alertmanager API that Prometheus uses to send alerts. It can be "v1" or "v2". + type: string + bearerTokenFile: + description: BearerTokenFile to read from filesystem to use when authenticating to Alertmanager. + type: string + name: + description: Name of Endpoints object in Namespace. + type: string + namespace: + description: Namespace of Endpoints object. + type: string + pathPrefix: + description: Prefix for the HTTP path alerts are pushed to. + type: string + port: + anyOf: + - type: integer + - type: string + description: Port the Alertmanager API is exposed on. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use when firing alerts. + type: string + timeout: + description: Timeout is a per-target Alertmanager timeout when pushing alerts. + type: string + tlsConfig: + description: TLS Config to use for alertmanager connection. + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container to use for the targets. + type: string + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + required: + - name + - namespace + - port + type: object + type: array + required: + - alertmanagers + type: object + allowOverlappingBlocks: + description: AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental in Prometheus so it may change in any upcoming release. + type: boolean + apiserverConfig: + description: APIServerConfig allows specifying a host and auth methods to access apiserver. If left empty, Prometheus is assumed to run inside of the cluster and will discover API servers automatically and use the pod's CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. + properties: + basicAuth: + description: BasicAuth allow an endpoint to authenticate over basic authentication + properties: + password: + description: The secret in the service monitor namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerToken: + description: Bearer token for accessing apiserver. + type: string + bearerTokenFile: + description: File to read bearer token for accessing apiserver. + type: string + host: + description: Host of apiserver. A valid string consisting of a hostname or IP followed by an optional port number + type: string + tlsConfig: + description: TLS Config to use for accessing apiserver. + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container to use for the targets. + type: string + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + required: + - host + type: object + arbitraryFSAccessThroughSMs: + description: ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files on the file system of the Prometheus container e.g. bearer token files. + properties: + deny: + type: boolean + type: object + baseImage: + description: 'Base image to use for a Prometheus deployment. Deprecated: use ''image'' instead' + type: string + configMaps: + description: ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. The ConfigMaps are mounted into /etc/prometheus/configmaps/. + items: + type: string + type: array + containers: + description: 'Containers allows injecting additional containers or modifying operator generated containers. This can be used to allow adding an authentication proxy to a Prometheus pod or to change the behavior of an operator generated container. Containers described here modify an operator generated container if they share the same name and modifications are done via a strategic merge patch. The current container names are: `prometheus`, `config-reloader`, and `thanos-sidecar`. Overriding containers is entirely outside the scope of what the maintainers will support and by doing so, you accept that this behaviour may break at any time without notice.' + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The docker image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod''s termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + disableCompaction: + description: Disable prometheus compaction. + type: boolean + enableAdminAPI: + description: 'Enable access to prometheus web admin API. Defaults to the value of `false`. WARNING: Enabling the admin APIs enables mutating endpoints, to delete data, shutdown Prometheus, and more. Enabling this should be done with care and the user is advised to add additional authentication authorization via a proxy to ensure only clients authorized to perform these actions can do so. For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis' + type: boolean + enforcedNamespaceLabel: + description: EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created. The label value will always be the namespace of the object that is being created. + type: string + enforcedSampleLimit: + description: EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead. + format: int64 + type: integer + enforcedTargetLimit: + description: EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep overall number of targets under the desired limit. Note that if TargetLimit is higher that value will be taken instead. + format: int64 + type: integer + evaluationInterval: + description: Interval between consecutive evaluations. + type: string + externalLabels: + additionalProperties: + type: string + description: The labels to add to any time series or alerts when communicating with external systems (federation, remote storage, Alertmanager). + type: object + externalUrl: + description: The external URL the Prometheus instances will be available under. This is necessary to generate correct URLs. This is necessary if Prometheus is not served from root of a DNS name. + type: string + ignoreNamespaceSelectors: + description: IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector settings from the podmonitor and servicemonitor configs, and they will only discover endpoints within their current namespace. Defaults to false. + type: boolean + image: + description: Image if specified has precedence over baseImage, tag and sha combinations. Specifying the version is still necessary to ensure the Prometheus Operator knows what version of Prometheus is being configured. + type: string + imagePullSecrets: + description: An optional list of references to secrets in the same namespace to use for pulling prometheus and alertmanager images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'InitContainers allows adding initContainers to the pod definition. Those can be used to e.g. fetch secrets for injection into the Prometheus configuration from external sources. Any errors during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ Using initContainers for any use case other then secret fetching is entirely outside the scope of what the maintainers will support and by doing so, you accept that this behaviour may break at any time without notice.' + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The docker image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod''s termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + listenLocal: + description: ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. + type: boolean + logFormat: + description: Log format for Prometheus to be configured with. + type: string + logLevel: + description: Log level for Prometheus to be configured with. + type: string + nodeSelector: + additionalProperties: + type: string + description: Define which Nodes the Pods are scheduled on. + type: object + overrideHonorLabels: + description: OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor or PodMonitor to true, this overrides honor_labels to false. + type: boolean + overrideHonorTimestamps: + description: OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs. + type: boolean + paused: + description: When a Prometheus deployment is paused, no actions except for deletion will be performed on the underlying objects. + type: boolean + podMetadata: + description: PodMetadata configures Labels and Annotations which are propagated to the prometheus pods. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + podMonitorNamespaceSelector: + description: Namespace's labels to match for PodMonitor discovery. If nil, only check own namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + podMonitorSelector: + description: '*Experimental* PodMonitors to be selected for target discovery. *Deprecated:* if neither this nor serviceMonitorSelector are specified, configuration is unmanaged.' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + portName: + description: Port name used for the pods and governing service. This defaults to web + type: string + priorityClassName: + description: Priority class assigned to the Pods + type: string + probeNamespaceSelector: + description: '*Experimental* Namespaces to be selected for Probe discovery. If nil, only check own namespace.' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + probeSelector: + description: '*Experimental* Probes to be selected for target discovery.' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + prometheusExternalLabelName: + description: Name of Prometheus external label used to denote Prometheus instance name. Defaults to the value of `prometheus`. External label will _not_ be added when value is set to empty string (`""`). + type: string + prometheusRulesExcludedFromEnforce: + description: PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels. Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair + items: + description: PrometheusRuleExcludeConfig enables users to configure excluded PrometheusRule names and their namespaces to be ignored while enforcing namespace label for alerts and metrics. + properties: + ruleName: + description: RuleNamespace - name of excluded rule + type: string + ruleNamespace: + description: RuleNamespace - namespace of excluded rule + type: string + required: + - ruleName + - ruleNamespace + type: object + type: array + query: + description: QuerySpec defines the query command line flags when starting Prometheus. + properties: + lookbackDelta: + description: The delta difference allowed for retrieving metrics during expression evaluations. + type: string + maxConcurrency: + description: Number of concurrent queries that can be run at once. + format: int32 + type: integer + maxSamples: + description: Maximum number of samples a single query can load into memory. Note that queries will fail if they would load more samples than this into memory, so this also limits the number of samples a query can return. + format: int32 + type: integer + timeout: + description: Maximum time a query may take before being aborted. + type: string + type: object + queryLogFile: + description: QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable, and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such as `/dev/stdout` to log querie information to the default Prometheus log stream. This is only available in versions of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/) + type: string + remoteRead: + description: If specified, the remote_read spec. This is an experimental feature, it may change in any upcoming release in a breaking way. + items: + description: RemoteReadSpec defines the remote_read configuration for prometheus. + properties: + basicAuth: + description: BasicAuth for the URL. + properties: + password: + description: The secret in the service monitor namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerToken: + description: bearer token for remote read. + type: string + bearerTokenFile: + description: File to read bearer token for remote read. + type: string + name: + description: The name of the remote read queue, must be unique if specified. The name is used in metrics and logging in order to differentiate read configurations. Only valid in Prometheus versions 2.15.0 and newer. + type: string + proxyUrl: + description: Optional ProxyURL + type: string + readRecent: + description: Whether reads should be made for queries for time ranges that the local storage should have complete data for. + type: boolean + remoteTimeout: + description: Timeout for requests to the remote read endpoint. + type: string + requiredMatchers: + additionalProperties: + type: string + description: An optional list of equality matchers which have to be present in a selector to query the remote read endpoint. + type: object + tlsConfig: + description: TLS Config to use for remote read. + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container to use for the targets. + type: string + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + url: + description: The URL of the endpoint to send samples to. + type: string + required: + - url + type: object + type: array + remoteWrite: + description: If specified, the remote_write spec. This is an experimental feature, it may change in any upcoming release in a breaking way. + items: + description: RemoteWriteSpec defines the remote_write configuration for prometheus. + properties: + basicAuth: + description: BasicAuth for the URL. + properties: + password: + description: The secret in the service monitor namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerToken: + description: File to read bearer token for remote write. + type: string + bearerTokenFile: + description: File to read bearer token for remote write. + type: string + headers: + additionalProperties: + type: string + description: Custom HTTP headers to be sent along with each remote write request. Be aware that headers that are set by Prometheus itself can't be overwritten. Only valid in Prometheus versions 2.25.0 and newer. + type: object + name: + description: The name of the remote write queue, must be unique if specified. The name is used in metrics and logging in order to differentiate queues. Only valid in Prometheus versions 2.15.0 and newer. + type: string + proxyUrl: + description: Optional ProxyURL + type: string + queueConfig: + description: QueueConfig allows tuning of the remote write queue parameters. + properties: + batchSendDeadline: + description: BatchSendDeadline is the maximum time a sample will wait in buffer. + type: string + capacity: + description: Capacity is the number of samples to buffer per shard before we start dropping them. + type: integer + maxBackoff: + description: MaxBackoff is the maximum retry delay. + type: string + maxRetries: + description: MaxRetries is the maximum number of times to retry a batch on recoverable errors. + type: integer + maxSamplesPerSend: + description: MaxSamplesPerSend is the maximum number of samples per send. + type: integer + maxShards: + description: MaxShards is the maximum number of shards, i.e. amount of concurrency. + type: integer + minBackoff: + description: MinBackoff is the initial retry delay. Gets doubled for every retry. + type: string + minShards: + description: MinShards is the minimum number of shards, i.e. amount of concurrency. + type: integer + type: object + remoteTimeout: + description: Timeout for requests to the remote write endpoint. + type: string + tlsConfig: + description: TLS Config to use for remote write. + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container to use for the targets. + type: string + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + url: + description: The URL of the endpoint to send samples to. + type: string + writeRelabelConfigs: + description: The list of remote write relabel configurations. + items: + description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines ``-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available. + type: string + type: object + type: array + required: + - url + type: object + type: array + replicaExternalLabelName: + description: Name of Prometheus external label used to denote replica name. Defaults to the value of `prometheus_replica`. External label will _not_ be added when value is set to empty string (`""`). + type: string + replicas: + description: Number of replicas of each shard to deploy for a Prometheus deployment. Number of replicas multiplied by shards is the total number of Pods created. + format: int32 + type: integer + resources: + description: Define resources requests and limits for single Pods. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + retention: + description: Time duration Prometheus shall retain data for. Default is '24h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds seconds minutes hours days weeks years). + type: string + retentionSize: + description: 'Maximum amount of disk space used by blocks. Supported units: B, KB, MB, GB, TB, PB, EB. Ex: `512MB`.' + type: string + routePrefix: + description: The route prefix Prometheus registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, but the server serves requests under a different route prefix. For example for use with `kubectl proxy`. + type: string + ruleNamespaceSelector: + description: Namespaces to be selected for PrometheusRules discovery. If unspecified, only the same namespace as the Prometheus object is in is used. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + ruleSelector: + description: A selector to select which PrometheusRules to mount for loading alerting/recording rules from. Until (excluding) Prometheus Operator v0.24.0 Prometheus Operator will migrate any legacy rule ConfigMaps to PrometheusRule custom resources selected by RuleSelector. Make sure it does not match any config maps that you do not want to be migrated. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + rules: + description: /--rules.*/ command-line arguments. + properties: + alert: + description: /--rules.alert.*/ command-line arguments + properties: + forGracePeriod: + description: Minimum duration between alert and restored 'for' state. This is maintained only for alerts with configured 'for' time greater than grace period. + type: string + forOutageTolerance: + description: Max time to tolerate prometheus outage for restoring 'for' state of alert. + type: string + resendDelay: + description: Minimum amount of time to wait before resending an alert to Alertmanager. + type: string + type: object + type: object + scrapeInterval: + description: Interval between consecutive scrapes. + type: string + scrapeTimeout: + description: Number of seconds to wait for target to respond before erroring. + type: string + secrets: + description: Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. The Secrets are mounted into /etc/prometheus/secrets/. + items: + type: string + type: array + securityContext: + description: SecurityContext holds pod-level security attributes and common container settings. This defaults to the default PodSecurityContext. + properties: + fsGroup: + description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: \n 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership and permissions of any volume." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always".' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + supplementalGroups: + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccountName: + description: ServiceAccountName is the name of the ServiceAccount to use to run the Prometheus Pods. + type: string + serviceMonitorNamespaceSelector: + description: Namespace's labels to match for ServiceMonitor discovery. If nil, only check own namespace. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + serviceMonitorSelector: + description: ServiceMonitors to be selected for target discovery. *Deprecated:* if neither this nor podMonitorSelector are specified, configuration is unmanaged. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + sha: + description: 'SHA of Prometheus container image to be deployed. Defaults to the value of `version`. Similar to a tag, but the SHA explicitly deploys an immutable container image. Version and Tag are ignored if SHA is set. Deprecated: use ''image'' instead. The image digest can be specified as part of the image URL.' + type: string + shards: + description: 'EXPERIMENTAL: Number of shards to distribute targets onto. Number of replicas multiplied by shards is the total number of Pods created. Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved. Increasing shards will not reshard data either but it will continue to be available from the same instances. To query globally use Thanos sidecar and Thanos querier or remote write data to a central location. Sharding is done on the content of the `__address__` target meta-label.' + format: int32 + type: integer + storage: + description: Storage spec to specify how storage shall be used. + properties: + disableMountSubPath: + description: 'Deprecated: subPath usage will be disabled by default in a future release, this option will become unnecessary. DisableMountSubPath allows to remove any subPath usage in volume mounts.' + type: boolean + emptyDir: + description: 'EmptyDirVolumeSource to be used by the Prometheus StatefulSets. If specified, used in place of any volumeClaimTemplate. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' + properties: + medium: + description: 'What type of storage medium should back this directory. The default is "" which means to use the node''s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + volumeClaimTemplate: + description: A PVC spec to be used by the Prometheus StatefulSets. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: EmbeddedMetadata contains metadata relevant to an EmbeddedResource. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + spec: + description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) * An existing PVC (PersistentVolumeClaim) * An existing custom resource/object that implements data population (Alpha) In order to use VolumeSnapshot object types, the appropriate feature gate must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the specified data source is not supported, the volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + status: + description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Represents the actual resources of the underlying volume. + type: object + conditions: + description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails details about state of pvc + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + format: date-time + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. + type: string + type: object + type: object + type: object + tag: + description: 'Tag of Prometheus container image to be deployed. Defaults to the value of `version`. Version is ignored if Tag is set. Deprecated: use ''image'' instead. The image tag can be specified as part of the image URL.' + type: string + thanos: + description: "Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment. \n This section is experimental, it may change significantly without deprecation notice in any release. \n This is experimental and may change significantly without backward compatibility in any release." + properties: + baseImage: + description: 'Thanos base image if other than default. Deprecated: use ''image'' instead' + type: string + grpcServerTlsConfig: + description: 'GRPCServerTLSConfig configures the gRPC server from which Thanos Querier reads recorded rule data. Note: Currently only the CAFile, CertFile, and KeyFile fields are supported. Maps to the ''--grpc-server-tls-*'' CLI args.' + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container to use for the targets. + type: string + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + image: + description: Image if specified has precedence over baseImage, tag and sha combinations. Specifying the version is still necessary to ensure the Prometheus Operator knows what version of Thanos is being configured. + type: string + listenLocal: + description: ListenLocal makes the Thanos sidecar listen on loopback, so that it does not bind against the Pod IP. + type: boolean + logFormat: + description: LogFormat for Thanos sidecar to be configured with. + type: string + logLevel: + description: LogLevel for Thanos sidecar to be configured with. + type: string + minTime: + description: MinTime for Thanos sidecar to be configured with. Option can be a constant time in RFC3339 format or time duration relative to current time, such as -1d or 2h45m. Valid duration units are ms, s, m, h, d, w, y. + type: string + objectStorageConfig: + description: ObjectStorageConfig configures object storage in Thanos. Alternative to ObjectStorageConfigFile, and lower order priority. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + objectStorageConfigFile: + description: ObjectStorageConfigFile specifies the path of the object storage configuration file. When used alongside with ObjectStorageConfig, ObjectStorageConfigFile takes precedence. + type: string + resources: + description: Resources defines the resource requirements for the Thanos sidecar. If not provided, no requests/limits will be set + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + sha: + description: 'SHA of Thanos container image to be deployed. Defaults to the value of `version`. Similar to a tag, but the SHA explicitly deploys an immutable container image. Version and Tag are ignored if SHA is set. Deprecated: use ''image'' instead. The image digest can be specified as part of the image URL.' + type: string + tag: + description: 'Tag of Thanos sidecar container image to be deployed. Defaults to the value of `version`. Version is ignored if Tag is set. Deprecated: use ''image'' instead. The image tag can be specified as part of the image URL.' + type: string + tracingConfig: + description: TracingConfig configures tracing in Thanos. This is an experimental feature, it may change in any upcoming release in a breaking way. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + tracingConfigFile: + description: TracingConfig specifies the path of the tracing configuration file. When used alongside with TracingConfig, TracingConfigFile takes precedence. + type: string + version: + description: Version describes the version of Thanos to use. + type: string + type: object + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: If specified, the pod's topology spread constraints. + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. It''s the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It''s considered as "Unsatisfiable" if and only if placing incoming pod on any topology violates "MaxSkew". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + version: + description: Version of Prometheus to be deployed. + type: string + volumeMounts: + description: VolumeMounts allows configuration of additional VolumeMounts on the output StatefulSet definition. VolumeMounts specified will be appended to other VolumeMounts in the prometheus container, that are generated as a result of StorageSpec objects. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + volumes: + description: Volumes allows configuration of additional volumes on the output StatefulSet definition. Volumes specified will be appended to other volumes that are generated as a result of StorageSpec objects. + items: + description: Volume represents a named volume in a pod that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". If omitted, the default is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this directory. The default is "" which means to use the node''s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + fc: + description: FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, and downward API + properties: + defaultMode: + description: Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with other supported volume types + properties: + configMap: + description: information about the configMap data to project + properties: + items: + description: If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to project + properties: + items: + description: If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken data to project + properties: + audience: + description: Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the mount point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount user + type: string + volume: + description: Volume is a string that references an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the protection domain. + type: string + system: + description: The name of the storage system as configured in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + walCompression: + description: Enable compression of the write-ahead log using Snappy. This flag is only available in versions of Prometheus >= 2.11.0. + type: boolean + web: + description: WebSpec defines the web command line flags when starting Prometheus. + properties: + pageTitle: + description: The prometheus web page title + type: string + type: object + type: object + status: + description: 'Most recent observed status of the Prometheus cluster. Read-only. Not included when requesting from the apiserver, only from the Prometheus Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + availableReplicas: + description: Total number of available pods (ready for at least minReadySeconds) targeted by this Prometheus deployment. + format: int32 + type: integer + paused: + description: Represents whether any actions on the underlying managed objects are being performed. Only delete actions will be performed. + type: boolean + replicas: + description: Total number of non-terminated pods targeted by this Prometheus deployment (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable pods targeted by this Prometheus deployment. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted by this Prometheus deployment that have the desired version spec. + format: int32 + type: integer + required: + - availableReplicas + - paused + - replicas + - unavailableReplicas + - updatedReplicas + type: object + required: + - spec + type: object + served: true + storage: true + subresources: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-prometheusrules.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-prometheusrules.yaml new file mode 100755 index 000000000..07a24df45 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-prometheusrules.yaml @@ -0,0 +1,90 @@ +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: prometheusrules.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + kind: PrometheusRule + listKind: PrometheusRuleList + plural: prometheusrules + singular: prometheusrule + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: PrometheusRule defines recording and alerting rules for a Prometheus instance + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired alerting rule definitions for Prometheus. + properties: + groups: + description: Content of Prometheus rule file + items: + description: 'RuleGroup is a list of sequentially evaluated recording and alerting rules. Note: PartialResponseStrategy is only used by ThanosRuler and will be ignored by Prometheus instances. Valid values for this field are ''warn'' or ''abort''. More info: https://github.com/thanos-io/thanos/blob/master/docs/components/rule.md#partial-response' + properties: + interval: + type: string + name: + type: string + partial_response_strategy: + type: string + rules: + items: + description: Rule describes an alerting or recording rule. + properties: + alert: + type: string + annotations: + additionalProperties: + type: string + type: object + expr: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + for: + type: string + labels: + additionalProperties: + type: string + type: object + record: + type: string + required: + - expr + type: object + type: array + required: + - name + - rules + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-servicemonitors.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-servicemonitors.yaml new file mode 100755 index 000000000..9dee64ff9 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-servicemonitors.yaml @@ -0,0 +1,375 @@ +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: servicemonitors.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: ServiceMonitor + listKind: ServiceMonitorList + plural: servicemonitors + singular: servicemonitor + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ServiceMonitor defines monitoring for a set of services. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of desired Service selection for target discovery by Prometheus. + properties: + endpoints: + description: A list of endpoints allowed as part of this ServiceMonitor. + items: + description: Endpoint defines a scrapeable endpoint serving Prometheus metrics. + properties: + basicAuth: + description: 'BasicAuth allow an endpoint to authenticate over basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints' + properties: + password: + description: The secret in the service monitor namespace that contains the password for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + username: + description: The secret in the service monitor namespace that contains the username for authentication. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + bearerTokenFile: + description: File to read bearer token for scraping targets. + type: string + bearerTokenSecret: + description: Secret to mount to read bearer token for scraping targets. The secret needs to be in the same namespace as the service monitor and accessible by the Prometheus Operator. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + honorLabels: + description: HonorLabels chooses the metric's labels on collisions with target labels. + type: boolean + honorTimestamps: + description: HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. + type: boolean + interval: + description: Interval at which metrics should be scraped + type: string + metricRelabelings: + description: MetricRelabelConfigs to apply to samples before ingestion. + items: + description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines ``-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available. + type: string + type: object + type: array + params: + additionalProperties: + items: + type: string + type: array + description: Optional HTTP URL parameters + type: object + path: + description: HTTP path to scrape for metrics. + type: string + port: + description: Name of the service port this endpoint refers to. Mutually exclusive with targetPort. + type: string + proxyUrl: + description: ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. + type: string + relabelings: + description: 'RelabelConfigs to apply to samples before scraping. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + items: + description: 'RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. It defines ``-section of Prometheus configuration. More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs' + properties: + action: + description: Action to perform based on regex matching. Default is 'replace' + type: string + modulus: + description: Modulus to take of the hash of the source label values. + format: int64 + type: integer + regex: + description: Regular expression against which the extracted value is matched. Default is '(.*)' + type: string + replacement: + description: Replacement value against which a regex replace is performed if the regular expression matches. Regex capture groups are available. Default is '$1' + type: string + separator: + description: Separator placed between concatenated source label values. default is ';'. + type: string + sourceLabels: + description: The source labels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the replace, keep, and drop actions. + items: + type: string + type: array + targetLabel: + description: Label to which the resulting value is written in a replace action. It is mandatory for replace actions. Regex capture groups are available. + type: string + type: object + type: array + scheme: + description: HTTP scheme to use for scraping. + type: string + scrapeTimeout: + description: Timeout after which the scrape is ended + type: string + targetPort: + anyOf: + - type: integer + - type: string + description: Name or number of the target port of the Pod behind the Service, the port must be specified with container port property. Mutually exclusive with port. + x-kubernetes-int-or-string: true + tlsConfig: + description: TLS configuration to use when scraping the endpoint + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container to use for the targets. + type: string + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + type: object + type: array + jobLabel: + description: The label to use to retrieve the job name from. + type: string + namespaceSelector: + description: Selector to select which namespaces the Endpoints objects are discovered from. + properties: + any: + description: Boolean describing whether all namespaces are selected in contrast to a list restricting them. + type: boolean + matchNames: + description: List of namespace names. + items: + type: string + type: array + type: object + podTargetLabels: + description: PodTargetLabels transfers labels on the Kubernetes Pod onto the target. + items: + type: string + type: array + sampleLimit: + description: SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + format: int64 + type: integer + selector: + description: Selector to select Endpoints objects. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + targetLabels: + description: TargetLabels transfers labels on the Kubernetes Service onto the target. + items: + type: string + type: array + targetLimit: + description: TargetLimit defines a limit on the number of scraped targets that will be accepted. + format: int64 + type: integer + required: + - endpoints + - selector + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-thanosrulers.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-thanosrulers.yaml new file mode 100755 index 000000000..a470d4b9f --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/crd-manifest/crd-thanosrulers.yaml @@ -0,0 +1,3342 @@ +# https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml + +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.4.1 + creationTimestamp: null + name: thanosrulers.monitoring.coreos.com +spec: + group: monitoring.coreos.com + names: + categories: + - prometheus-operator + kind: ThanosRuler + listKind: ThanosRulerList + plural: thanosrulers + singular: thanosruler + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ThanosRuler defines a ThanosRuler deployment. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: 'Specification of the desired behavior of the ThanosRuler cluster. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + affinity: + description: If specified, the pod's scheduling constraints. + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + alertDropLabels: + description: AlertDropLabels configure the label names which should be dropped in ThanosRuler alerts. If `labels` field is not provided, `thanos_ruler_replica` will be dropped in alerts by default. + items: + type: string + type: array + alertQueryUrl: + description: The external Query URL the Thanos Ruler will set in the 'Source' field of all alerts. Maps to the '--alert.query-url' CLI arg. + type: string + alertmanagersConfig: + description: Define configuration for connecting to alertmanager. Only available with thanos v0.10.0 and higher. Maps to the `alertmanagers.config` arg. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + alertmanagersUrl: + description: 'Define URLs to send alerts to Alertmanager. For Thanos v0.10.0 and higher, AlertManagersConfig should be used instead. Note: this field will be ignored if AlertManagersConfig is specified. Maps to the `alertmanagers.url` arg.' + items: + type: string + type: array + containers: + description: 'Containers allows injecting additional containers or modifying operator generated containers. This can be used to allow adding an authentication proxy to a ThanosRuler pod or to change the behavior of an operator generated container. Containers described here modify an operator generated container if they share the same name and modifications are done via a strategic merge patch. The current container names are: `thanos-ruler` and `config-reloader`. Overriding containers is entirely outside the scope of what the maintainers will support and by doing so, you accept that this behaviour may break at any time without notice.' + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The docker image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod''s termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + enforcedNamespaceLabel: + description: EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created. The label value will always be the namespace of the object that is being created. + type: string + evaluationInterval: + description: Interval between consecutive evaluations. + type: string + externalPrefix: + description: The external URL the Thanos Ruler instances will be available under. This is necessary to generate correct URLs. This is necessary if Thanos Ruler is not served from root of a DNS name. + type: string + grpcServerTlsConfig: + description: 'GRPCServerTLSConfig configures the gRPC server from which Thanos Querier reads recorded rule data. Note: Currently only the CAFile, CertFile, and KeyFile fields are supported. Maps to the ''--grpc-server-tls-*'' CLI args.' + properties: + ca: + description: Struct containing the CA cert to use for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + caFile: + description: Path to the CA cert in the Prometheus container to use for the targets. + type: string + cert: + description: Struct containing the client cert file for the targets. + properties: + configMap: + description: ConfigMap containing data to use for the targets. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + secret: + description: Secret containing data to use for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + certFile: + description: Path to the client cert file in the Prometheus container for the targets. + type: string + insecureSkipVerify: + description: Disable target certificate validation. + type: boolean + keyFile: + description: Path to the client key file in the Prometheus container for the targets. + type: string + keySecret: + description: Secret containing the client key file for the targets. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + serverName: + description: Used to verify the hostname for the targets. + type: string + type: object + image: + description: Thanos container image URL. + type: string + imagePullSecrets: + description: An optional list of references to secrets in the same namespace to use for pulling thanos images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod + items: + description: LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + type: array + initContainers: + description: 'InitContainers allows adding initContainers to the pod definition. Those can be used to e.g. fetch secrets for injection into the ThanosRuler configuration from external sources. Any errors during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ Using initContainers for any use case other then secret fetching is entirely outside the scope of what the maintainers will support and by doing so, you accept that this behaviour may break at any time without notice.' + items: + description: A single application container that you want to run within a pod. + properties: + args: + description: 'Arguments to the entrypoint. The docker image''s CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + command: + description: 'Entrypoint array. Not executed within a shell. The docker image''s ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container''s environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + items: + type: string + type: array + env: + description: List of environment variables to set in the container. Cannot be updated. + items: + description: EnvVar represents an environment variable present in a Container. + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + type: object + required: + - name + type: object + type: array + envFrom: + description: List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated. + items: + description: EnvFromSource represents the source of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + type: object + prefix: + description: An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + type: object + type: object + type: array + image: + description: 'Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management system should take in response to container lifecycle events. Cannot be updated. + properties: + postStart: + description: 'PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + preStop: + description: 'PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod''s termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod''s termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + type: object + type: object + livenessProbe: + description: 'Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + name: + description: Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated. + type: string + ports: + description: List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. + items: + description: ContainerPort represents a network port in a single container. + properties: + containerPort: + description: Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536. + format: int32 + type: integer + hostIP: + description: What host IP to bind the external port to. + type: string + hostPort: + description: Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this. + format: int32 + type: integer + name: + description: If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services. + type: string + protocol: + default: TCP + description: Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP". + type: string + required: + - containerPort + type: object + type: array + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + resources: + description: 'Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + securityContext: + description: 'Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. + properties: + add: + description: Added capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + drop: + description: Removed capabilities + items: + description: Capability represent POSIX capabilities type + type: string + type: array + type: object + privileged: + description: Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. + type: boolean + procMount: + description: procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container has a read-only root filesystem. Default is false. + type: boolean + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + startupProbe: + description: 'StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod''s lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + properties: + exec: + description: One and only one of the following should be specified. Exec specifies the action to take. + properties: + command: + description: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + items: + type: string + type: array + type: object + failureThreshold: + description: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + format: int32 + type: integer + httpGet: + description: HTTPGet specifies the http request to perform. + properties: + host: + description: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers to set in the request. HTTP allows repeated headers. + items: + description: HTTPHeader describes a custom header to be used in HTTP probes + properties: + name: + description: The header field name + type: string + value: + description: The header field value + type: string + required: + - name + - value + type: object + type: array + path: + description: Path to access on the HTTP server. + type: string + port: + anyOf: + - type: integer + - type: string + description: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for connecting to the host. Defaults to HTTP. + type: string + required: + - port + type: object + initialDelaySeconds: + description: 'Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + periodSeconds: + description: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. + format: int32 + type: integer + successThreshold: + description: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + format: int32 + type: integer + tcpSocket: + description: 'TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook' + properties: + host: + description: 'Optional: Host name to connect to, defaults to the pod IP.' + type: string + port: + anyOf: + - type: integer + - type: string + description: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + x-kubernetes-int-or-string: true + required: + - port + type: object + timeoutSeconds: + description: 'Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + format: int32 + type: integer + type: object + stdin: + description: Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which the file to which the container''s termination message will be written is mounted into the container''s filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated. + type: string + tty: + description: Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list of block devices to be used by the container. + items: + description: volumeDevice describes a mapping of a raw block device within a container. + properties: + devicePath: + description: devicePath is the path inside of the container that the device will be mapped to. + type: string + name: + description: name must match the name of a persistentVolumeClaim in the pod + type: string + required: + - devicePath + - name + type: object + type: array + volumeMounts: + description: Pod volumes to mount into the container's filesystem. Cannot be updated. + items: + description: VolumeMount describes a mounting of a Volume within a container. + properties: + mountPath: + description: Path within the container at which the volume should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to "" (volume's root). SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + workingDir: + description: Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated. + type: string + required: + - name + type: object + type: array + labels: + additionalProperties: + type: string + description: Labels configure the external label pairs to ThanosRuler. If not provided, default replica label `thanos_ruler_replica` will be added as a label and be dropped in alerts. + type: object + listenLocal: + description: ListenLocal makes the Thanos ruler listen on loopback, so that it does not bind against the Pod IP. + type: boolean + logFormat: + description: Log format for ThanosRuler to be configured with. + type: string + logLevel: + description: Log level for ThanosRuler to be configured with. + type: string + nodeSelector: + additionalProperties: + type: string + description: Define which Nodes the Pods are scheduled on. + type: object + objectStorageConfig: + description: ObjectStorageConfig configures object storage in Thanos. Alternative to ObjectStorageConfigFile, and lower order priority. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + objectStorageConfigFile: + description: ObjectStorageConfigFile specifies the path of the object storage configuration file. When used alongside with ObjectStorageConfig, ObjectStorageConfigFile takes precedence. + type: string + paused: + description: When a ThanosRuler deployment is paused, no actions except for deletion will be performed on the underlying objects. + type: boolean + podMetadata: + description: PodMetadata contains Labels and Annotations gets propagated to the thanos ruler pods. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + portName: + description: Port name used for the pods and governing service. This defaults to web + type: string + priorityClassName: + description: Priority class assigned to the Pods + type: string + prometheusRulesExcludedFromEnforce: + description: PrometheusRulesExcludedFromEnforce - list of Prometheus rules to be excluded from enforcing of adding namespace labels. Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair + items: + description: PrometheusRuleExcludeConfig enables users to configure excluded PrometheusRule names and their namespaces to be ignored while enforcing namespace label for alerts and metrics. + properties: + ruleName: + description: RuleNamespace - name of excluded rule + type: string + ruleNamespace: + description: RuleNamespace - namespace of excluded rule + type: string + required: + - ruleName + - ruleNamespace + type: object + type: array + queryConfig: + description: Define configuration for connecting to thanos query instances. If this is defined, the QueryEndpoints field will be ignored. Maps to the `query.config` CLI argument. Only available with thanos v0.11.0 and higher. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + queryEndpoints: + description: QueryEndpoints defines Thanos querier endpoints from which to query metrics. Maps to the --query flag of thanos ruler. + items: + type: string + type: array + replicas: + description: Number of thanos ruler instances to deploy. + format: int32 + type: integer + resources: + description: Resources defines the resource requirements for single Pods. If not provided, no requests/limits will be set + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + retention: + description: Time duration ThanosRuler shall retain data for. Default is '24h', and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds seconds minutes hours days weeks years). + type: string + routePrefix: + description: The route prefix ThanosRuler registers HTTP handlers for. This allows thanos UI to be served on a sub-path. + type: string + ruleNamespaceSelector: + description: Namespaces to be selected for Rules discovery. If unspecified, only the same namespace as the ThanosRuler object is in is used. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + ruleSelector: + description: A label selector to select which PrometheusRules to mount for alerting and recording. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + securityContext: + description: SecurityContext holds pod-level security attributes and common container settings. This defaults to the default PodSecurityContext. + properties: + fsGroup: + description: "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: \n 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- \n If unset, the Kubelet will not modify the ownership and permissions of any volume." + format: int64 + type: integer + fsGroupChangePolicy: + description: 'fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always".' + type: string + runAsGroup: + description: The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: object + supplementalGroups: + description: A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container. + items: + format: int64 + type: integer + type: array + sysctls: + description: Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + windowsOptions: + description: The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the GMSA credential spec to use. + type: string + runAsUserName: + description: The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + type: string + type: object + type: object + serviceAccountName: + description: ServiceAccountName is the name of the ServiceAccount to use to run the Thanos Ruler Pods. + type: string + storage: + description: Storage spec to specify how storage shall be used. + properties: + disableMountSubPath: + description: 'Deprecated: subPath usage will be disabled by default in a future release, this option will become unnecessary. DisableMountSubPath allows to remove any subPath usage in volume mounts.' + type: boolean + emptyDir: + description: 'EmptyDirVolumeSource to be used by the Prometheus StatefulSets. If specified, used in place of any volumeClaimTemplate. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir' + properties: + medium: + description: 'What type of storage medium should back this directory. The default is "" which means to use the node''s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + volumeClaimTemplate: + description: A PVC spec to be used by the Prometheus StatefulSets. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: EmbeddedMetadata contains metadata relevant to an EmbeddedResource. + properties: + annotations: + additionalProperties: + type: string + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + labels: + additionalProperties: + type: string + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + type: object + spec: + description: 'Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + dataSource: + description: 'This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) * An existing PVC (PersistentVolumeClaim) * An existing custom resource/object that implements data population (Alpha) In order to use VolumeSnapshot object types, the appropriate feature gate must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the specified data source is not supported, the volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.' + properties: + apiGroup: + description: APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource being referenced + type: string + name: + description: Name is the name of resource being referenced + type: string + required: + - kind + - name + type: object + resources: + description: 'Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources' + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + type: object + selector: + description: A label query over volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + storageClassName: + description: 'Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1' + type: string + volumeMode: + description: volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: VolumeName is the binding reference to the PersistentVolume backing this claim. + type: string + type: object + status: + description: 'Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + accessModes: + description: 'AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1' + items: + type: string + type: array + capacity: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Represents the actual resources of the underlying volume. + type: object + conditions: + description: Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. + items: + description: PersistentVolumeClaimCondition contails details about state of pvc + properties: + lastProbeTime: + description: Last time we probed the condition. + format: date-time + type: string + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + format: date-time + type: string + message: + description: Human-readable message indicating details about last transition. + type: string + reason: + description: Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports "ResizeStarted" that means the underlying persistent volume is being resized. + type: string + status: + type: string + type: + description: PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type + type: string + required: + - status + - type + type: object + type: array + phase: + description: Phase represents the current phase of PersistentVolumeClaim. + type: string + type: object + type: object + type: object + tolerations: + description: If specified, the pod's tolerations. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: If specified, the pod's topology spread constraints. + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. It''s the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: | zone1 | zone2 | zone3 | | P | P | | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It''s considered as "Unsatisfiable" if and only if placing incoming pod on any topology violates "MaxSkew". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + tracingConfig: + description: TracingConfig configures tracing in Thanos. This is an experimental feature, it may change in any upcoming release in a breaking way. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + volumes: + description: Volumes allows configuration of additional volumes on the output StatefulSet definition. Volumes specified will be appended to other volumes that are generated as a result of StorageSpec objects. + items: + description: Volume represents a named volume in a pod that may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + properties: + fsType: + description: 'Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).' + format: int32 + type: integer + readOnly: + description: 'Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". If omitted, the default is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + required: + - volumeID + type: object + azureDisk: + description: AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage Account Name and Key + type: string + shareName: + description: Share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + items: + type: string + type: array + path: + description: 'Optional: Used as the mounted root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + required: + - monitors + type: object + cinder: + description: 'Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing parameters used to connect to OpenStack.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeID: + description: 'volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + required: + - volumeID + type: object + configMap: + description: ConfigMap represents a configMap that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + csi: + description: CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature). + properties: + driver: + description: Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + readOnly: + description: Specifies a read-only configuration for the volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: DownwardAPI represents downward API about the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + emptyDir: + description: 'EmptyDir represents a temporary directory that shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + properties: + medium: + description: 'What type of storage medium should back this directory. The default is "" which means to use the node''s default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: 'Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + fc: + description: FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + items: + type: string + type: array + wwids: + description: 'Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.' + items: + type: string + type: array + type: object + flexVolume: + description: FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin. + properties: + driver: + description: Driver is the name of the driver to use for this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'Optional: Extra command options if any.' + type: object + readOnly: + description: 'Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + required: + - driver + type: object + flocker: + description: Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + properties: + datasetName: + description: Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + properties: + fsType: + description: 'Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + format: int32 + type: integer + pdName: + description: 'Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + required: + - pdName + type: object + gitRepo: + description: 'GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod''s container.' + properties: + directory: + description: Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + properties: + endpoints: + description: 'EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: 'HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath --- TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not mount host directories as read/write.' + properties: + path: + description: 'Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + required: + - path + type: object + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that is attached to a kubelet''s host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + readOnly: + description: ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator authentication + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + targetPortal: + description: iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + properties: + path: + description: 'Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + properties: + fsType: + description: FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + required: + - volumeID + type: object + projected: + description: Items for all in one resources secrets, configmaps, and downward API + properties: + defaultMode: + description: Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: list of volume projections + items: + description: Projection that may be projected along with other supported volume types + properties: + configMap: + description: information about the configMap data to project + properties: + items: + description: If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys must be defined + type: boolean + type: object + downwardAPI: + description: information about the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume file + items: + description: DownwardAPIVolumeFile represents information to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified API version. + type: string + required: + - fieldPath + type: object + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.' + properties: + containerName: + description: 'Container name: required for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + required: + - path + type: object + type: array + type: object + secret: + description: information about the secret data to project + properties: + items: + description: If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + type: object + serviceAccountToken: + description: information about the serviceAccountToken data to project + properties: + audience: + description: Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes. + format: int64 + type: integer + path: + description: Path is the path relative to the mount point of the file to project the token into. + type: string + required: + - path + type: object + type: object + type: array + required: + - sources + type: object + quobyte: + description: Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + properties: + group: + description: Group to map volume access to Default is no group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false. + type: boolean + registry: + description: Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to serivceaccount user + type: string + volume: + description: Volume is a string that references an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: 'RBD represents a Rados Block Device mount on the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + properties: + fsType: + description: 'Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd TODO: how do we prevent errors in the filesystem from compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + items: + type: string + type: array + pool: + description: 'The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + user: + description: 'The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + required: + - image + - monitors + type: object + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + sslEnabled: + description: Flag to enable/disable SSL communication with Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with the protection domain. + type: string + system: + description: The name of the storage system as configured in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the ScaleIO system that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: 'Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + properties: + defaultMode: + description: 'Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + items: + description: If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within a volume. + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.' + format: int32 + type: integer + path: + description: The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + optional: + description: Specify whether the Secret or its keys must be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + type: object + storageos: + description: StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted. + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + volumeName: + description: VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to "default" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + status: + description: 'Most recent observed status of the ThanosRuler cluster. Read-only. Not included when requesting from the apiserver, only from the ThanosRuler Operator API itself. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + properties: + availableReplicas: + description: Total number of available pods (ready for at least minReadySeconds) targeted by this ThanosRuler deployment. + format: int32 + type: integer + paused: + description: Represents whether any actions on the underlying managed objects are being performed. Only delete actions will be performed. + type: boolean + replicas: + description: Total number of non-terminated pods targeted by this ThanosRuler deployment (their labels match the selector). + format: int32 + type: integer + unavailableReplicas: + description: Total number of unavailable pods targeted by this ThanosRuler deployment. + format: int32 + type: integer + updatedReplicas: + description: Total number of non-terminated pods targeted by this ThanosRuler deployment that have the desired version spec. + format: int32 + type: integer + required: + - availableReplicas + - paused + - replicas + - unavailableReplicas + - updatedReplicas + type: object + required: + - spec + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/_helpers.tpl new file mode 100755 index 000000000..2da79e70f --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/_helpers.tpl @@ -0,0 +1,29 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/jobs.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/jobs.yaml new file mode 100755 index 000000000..210e4ab3a --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/jobs.yaml @@ -0,0 +1,117 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Chart.Name }}-create + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Chart.Name }} + annotations: + "helm.sh/hook": post-install, post-upgrade, post-rollback + "helm.sh/hook-delete-policy": before-hook-creation, hook-succeeded, hook-failed +spec: + template: + metadata: + name: {{ .Chart.Name }}-create + labels: + app: {{ .Chart.Name }} + spec: + serviceAccountName: {{ .Chart.Name }}-manager + securityContext: + runAsNonRoot: true + runAsUser: 1000 + initContainers: + - name: set-preserve-unknown-fields-false + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - > + {{- range $path, $_ := (.Files.Glob "crd-manifest/**.yaml") }} + {{- $crd := get (get ($.Files.Get $path | fromYaml) "metadata") "name" }} + if [[ -n "$(kubectl get crd {{ $crd }} -o jsonpath='{.spec.preserveUnknownFields}')" ]]; then + patch="{\"spec\": {\"preserveUnknownFields\": false}}" + if [[ -z "$(kubectl get crd {{ $crd }} -o jsonpath='{.spec.versions[0].schema}' 2>&1)" ]]; then + patch="{\"spec\": {\"preserveUnknownFields\": false, \"versions\": [{\"name\": \"v1\", \"served\": false, \"storage\": true}]}}" + fi + kubectl patch crd {{ $crd }} -p "${patch}" --type="merge"; + fi; + {{- end }} + containers: + - name: create-crds + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - > + kubectl apply -f /etc/config/crd-manifest.yaml + volumeMounts: + - name: crd-manifest + readOnly: true + mountPath: /etc/config + restartPolicy: OnFailure + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} + volumes: + - name: crd-manifest + configMap: + name: {{ .Chart.Name }}-manifest +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Chart.Name }}-delete + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Chart.Name }} + annotations: + "helm.sh/hook": pre-delete + "helm.sh/hook-delete-policy": before-hook-creation, hook-succeeded, hook-failed +spec: + template: + metadata: + name: {{ .Chart.Name }}-delete + labels: + app: {{ .Chart.Name }} + spec: + serviceAccountName: {{ .Chart.Name }}-manager + securityContext: + runAsNonRoot: true + runAsUser: 1000 + initContainers: + - name: remove-finalizers + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - > + {{- range $path, $_ := (.Files.Glob "crd-manifest/**.yaml") }} + {{- $crd := get (get ($.Files.Get $path | fromYaml) "metadata") "name" }} + kubectl patch crd {{ $crd }} -p '{"metadata": {"finalizers": []}}' || true; + {{- end }} + volumeMounts: + - name: crd-manifest + readOnly: true + mountPath: /etc/config + containers: + - name: delete-crds + image: {{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }} + imagePullPolicy: IfNotPresent + command: + - /bin/sh + - -c + - > + kubectl delete -f /etc/config/crd-manifest.yaml + volumeMounts: + - name: crd-manifest + readOnly: true + mountPath: /etc/config + restartPolicy: OnFailure + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} + volumes: + - name: crd-manifest + configMap: + name: {{ .Chart.Name }}-manifest diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/manifest.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/manifest.yaml new file mode 100755 index 000000000..31016b6ef --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/manifest.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Chart.Name }}-manifest + namespace: {{ .Release.Namespace }} +data: + crd-manifest.yaml: | + {{- $currentScope := . -}} + {{- $crds := (.Files.Glob "crd-manifest/**.yaml") -}} + {{- range $path, $_ := $crds -}} + {{- with $currentScope -}} + {{ .Files.Get $path | nindent 4 }} + --- + {{- end -}}{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/rbac.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/rbac.yaml new file mode 100755 index 000000000..bdda1ddad --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/templates/rbac.yaml @@ -0,0 +1,72 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Chart.Name }}-manager + labels: + app: {{ .Chart.Name }}-manager +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: ['create', 'get', 'patch', 'delete'] +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ .Chart.Name }}-manager +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Chart.Name }}-manager + labels: + app: {{ .Chart.Name }}-manager +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Chart.Name }}-manager +subjects: +- kind: ServiceAccount + name: {{ .Chart.Name }}-manager + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Chart.Name }}-manager + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Chart.Name }}-manager +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ .Chart.Name }}-manager + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Chart.Name }}-manager +spec: + privileged: false + allowPrivilegeEscalation: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'configMap' + - 'secret' diff --git a/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/values.yaml b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/values.yaml new file mode 100755 index 000000000..8f3ebe37a --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring-crd/14.5.100/values.yaml @@ -0,0 +1,11 @@ +# Default values for rancher-monitoring-crd. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +global: + cattle: + systemDefaultRegistry: "" + +image: + repository: rancher/rancher-agent + tag: v2.5.7 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/.helmignore new file mode 100755 index 000000000..93bf1ec02 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/.helmignore @@ -0,0 +1,26 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +# helm/charts +OWNERS +hack/ +ci/ +kube-prometheus-*.tgz diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/CHANGELOG.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/CHANGELOG.md new file mode 100755 index 000000000..8178169b9 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/CHANGELOG.md @@ -0,0 +1,47 @@ +# Changelog +All notable changes from the upstream Prometheus Operator chart will be added to this file. + +## [Package Version 00] - 2020-07-19 +### Added +- Added [Prometheus Adapter](https://github.com/helm/charts/tree/master/stable/prometheus-adapter) as a dependency to the upstream Prometheus Operator chart to allow users to expose custom metrics from the default Prometheus instance deployed by this chart +- Remove `prometheus-operator/cleanup-crds.yaml` and `prometheus-operator/crds.yaml` from the Prometheus Operator upstream chart in favor of just using the CRD directory to install the CRDs. +- Added support for `rkeControllerManager`, `rkeScheduler`, `rkeProxy`, and `rkeEtcd` PushProx exporters for monitoring k8s components within RKE clusters +- Added support for a `k3sServer` PushProx exporter that monitors k3s server components (`kubeControllerManager`, `kubeScheduler`, and `kubeProxy`) within k3s clusters +- Added support for `kubeAdmControllerManager`, `kubeAdmScheduler`, `kubeAdmProxy`, and `kubeAdmEtcd` PushProx exporters for monitoring k8s components within kubeAdm clusters +- Added support for `rke2ControllerManager`, `rke2Scheduler`, `rke2Proxy`, and `rke2Etcd` PushProx exporters for monitoring k8s components within rke2 clusters +- Exposed `prometheus.prometheusSpec.ignoreNamespaceSelectors` on values.yaml and set it to `false` by default. This value instructs the default Prometheus server deployed with this chart to ignore the `namespaceSelector` field within any created ServiceMonitor or PodMonitor CRs that it selects. This prevents ServiceMonitors and PodMonitors from configuring the Prometheus scrape configuration to monitor resources outside the namespace that they are deployed in; if a user needs to have one ServiceMonitor / PodMonitor monitor resources within several namespaces (such as the resources that are used to monitor Istio in a default installation), they should not enable this option since it would require them to create one ServiceMonitor / PodMonitor CR per namespace that they would like to monitor. Relevant fields were also updated in the default README.md. +- Added `grafana.sidecar.dashboards.searchNamespace` to `values.yaml` with a default value of `cattle-dashboards`. The namespace provided should contain all ConfigMaps with the label `grafana_dashboard` and will be searched by the Grafana Dashboards sidecar for updates. The namespace specified is also created along with this deployment. All default dashboard ConfigMaps have been relocated from the deployment namespace to the namespace specified +- Added `monitoring-admin`, `monitoring-edit`, and `monitoring-view` default `ClusterRoles` to allow admins to assign roles to users to interact with Prometheus Operator CRs. These can be enabled by setting `.Values.global.rbac.userRoles.create` (default: `true`). In a typical RBAC setup, you might want to use a `ClusterRoleBinding` to bind these roles to a Subject to allow them to set up or view `ServiceMonitors` / `PodMonitors` / `PrometheusRules` and view `Prometheus` or `Alertmanager` CRs across the cluster. If `.Values.global.rbac.userRoles.aggregateRolesForRBAC` is enabled, these ClusterRoles will aggregate into the respective default ClusterRoles provided by Kubernetes +- Added `monitoring-config-admin`, `monitoring-config-edit` and `monitoring-config-view` default `Roles` to allow admins to assign roles to users to be able to edit / view `Secrets` and `ConfigMaps` within the `cattle-monitoring-system` namespace. These can be enabled by setting `.Values.global.rbac.userRoles.create` (default: `true`). In a typical RBAC setup, you might want to use a `RoleBinding` to bind these roles to a Subject within the `cattle-monitoring-system` namespace to allow them to modify Secrets / ConfigMaps tied to the deployment, such as your Alertmanager Config Secret. +- Added `monitoring-dashboard-admin`, `monitoring-dashboard-edit` and `monitoring-dashboard-view` default `Roles` to allow admins to assign roles to users to be able to edit / view `ConfigMaps` within the `cattle-dashboards` namespace. These can be enabled by setting `.Values.global.rbac.userRoles.create` (default: `true`) and deploying Grafana as part of this chart. In a typical RBAC setup, you might want to use a `RoleBinding` to bind these roles to a Subject within the `cattle-dashboards` namespace to allow them to create / modify ConfigMaps that contain the JSON used to persist Grafana Dashboards on the cluster. +- Added default resource limits for `Prometheus Operator`, `Prometheus`, `AlertManager`, `Grafana`, `kube-state-metrics`, `node-exporter` +- Added a default template `rancher_defaults.tmpl` to AlertManager that Rancher will offer to users in order to help configure the way alerts are rendered on a notifier. Also updated the default template deployed with this chart to reference that template and added an example of a Slack config using this template as a comment in the `values.yaml`. +- Added support for private registries via introducing a new field for `global.cattle.systemDefaultRegistry` that, if supplied, will automatically be prepended onto every image used by the chart. +- Added a default `nginx` proxy container deployed with Grafana whose config is set in the `ConfigMap` located in `charts/grafana/templates/nginx-config.yaml`. The purpose of this container is to make it possible to view Grafana's UI through a proxy that has a subpath (e.g. Rancher's proxy). This proxy container is set to listen on port `8080` (with a `portName` of `nginx-http` instead of the default `service`), which is also where the Grafana service will now point to, and will forward all requests to the Grafana container listening on the default port `3000`. +- Added a default `nginx` proxy container deployed with Prometheus whose config is set in the `ConfigMap` located in `templates/prometheus/nginx-config.yaml`. The purpose of this container is to make it possible to view Prometheus's UI through a proxy that has a subpath (e.g. Rancher's proxy). This proxy container is set to listen on port `8081` (with a `portName` of `nginx-http` instead of the default `web`), which is also where the Prometheus service will now point to, and will forward all requests to the Prometheus container listening on the default port `9090`. +- Added support for passing CIS Scans in a hardened cluster by introducing a Job that patches the default service account within the `cattle-monitoring-system` and `cattle-dashboards` namespaces on install or upgrade and adding a default allow all `NetworkPolicy` to the `cattle-monitoring-system` and `cattle-dashboards` namespaces. +### Modified +- Updated the chart name from `prometheus-operator` to `rancher-monitoring` and added the `io.rancher.certified: rancher` annotation to `Chart.yaml` +- Modified the default `node-exporter` port from `9100` to `9796` +- Modified the default `nameOverride` to `rancher-monitoring`. This change is necessary as the Prometheus Adapter's default URL (`http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc`) is based off of the value used here; if modified, the default Adapter URL must also be modified +- Modified the default `namespaceOverride` to `cattle-monitoring-system`. This change is necessary as the Prometheus Adapter's default URL (`http://{{ .Values.nameOverride }}-prometheus.{{ .Values.namespaceOverride }}.svc`) is based off of the value used here; if modified, the default Adapter URL must also be modified +- Configured some default values for `grafana.service` values and exposed them in the default README.md +- The default namespaces the following ServiceMonitors were changed from the deployment namespace to allow them to continue to monitor metrics when `prometheus.prometheusSpec.ignoreNamespaceSelectors` is enabled: + - `core-dns`: `kube-system` + - `api-server`: `default` + - `kube-controller-manager`: `kube-system` + - `kubelet`: `{{ .Values.kubelet.namespace }}` +- Disabled the following deployments by default (can be enabled if required): + - `AlertManager` + - `kube-controller-manager` metrics exporter + - `kube-etcd` metrics exporter + - `kube-scheduler` metrics exporter + - `kube-proxy` metrics exporter +- Updated default Grafana `deploymentStrategy` to `Recreate` to prevent deployments from being stuck on upgrade if a PV is attached to Grafana +- Modified the default `SelectorNilUsesHelmValues` to default to `false`. As a result, we look for all CRs with any labels in all namespaces by default rather than just the ones tagged with the label `release: rancher-monitoring`. +- Modified the default images used by the `rancher-monitoring` chart to point to Rancher mirrors of the original images from upstream. +- Modified the behavior of the chart to create the Alertmanager Config Secret via a pre-install hook instead of using the normal Helm lifecycle to manage the secret. The benefit of this approach is that all changes to the Config Secret done on a live cluster will never get overridden on a `helm upgrade` since the secret only gets created on a `helm install`. If you would like the secret to be cleaned up on an `helm uninstall`, enable `alertmanager.cleanupOnUninstall`; however, this is disabled by default to prevent the loss of alerting configuration on an uninstall. This secret will never be modified on a `helm upgrade`. +- Modified the default `securityContext` for `Pod` templates across the chart to `{"runAsNonRoot": "true", "runAsUser": "1000"}` and replaced `grafana.rbac.pspUseAppArmor` in favor of `grafana.rbac.pspAnnotations={}` in order to make it possible to deploy this chart on a hardened cluster which does not support Seccomp or AppArmor annotations in PSPs. Users can always choose to specify the annotations they want to use for the PSP directly as part of the values provided. +- Modified `.Values.prometheus.prometheusSpec.containers` to take in a string representing a template that should be rendered by Helm (via `tpl`) instead of allowing a user to provide YAML directly. +- Modified the default Grafana configuration to auto assign users who access Grafana to the Viewer role and enable anonymous access to Grafana dashboards by default. This default works well for a Rancher user who is accessing Grafana via the `kubectl proxy` on the Rancher Dashboard UI since anonymous users who enter via the proxy are authenticated by the k8s API Server, but you can / should modify this behavior if you plan on exposing Grafana in a way that does not require authentication (e.g. as a `NodePort` service). +- Modified the default Grafana configuration to add a default dashboard for Rancher on the Grafana home page. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/CONTRIBUTING.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/CONTRIBUTING.md new file mode 100755 index 000000000..f6ce2a323 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/CONTRIBUTING.md @@ -0,0 +1,12 @@ +# Contributing Guidelines + +## How to contribute to this chart + +1. Fork this repository, develop and test your Chart. +1. Bump the chart version for every change. +1. Ensure PR title has the prefix `[kube-prometheus-stack]` +1. When making changes to rules or dashboards, see the README.md section on how to sync data from upstream repositories +1. Check the `hack/minikube` folder has scripts to set up minikube and components of this chart that will allow all components to be scraped. You can use this configuration when validating your changes. +1. Check for changes of RBAC rules. +1. Check for changes in CRD specs. +1. PR must pass the linter (`helm lint`) diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/Chart.yaml new file mode 100755 index 000000000..7568a480b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/Chart.yaml @@ -0,0 +1,103 @@ +annotations: + artifacthub.io/links: | + - name: Chart Source + url: https://github.com/prometheus-community/helm-charts + - name: Upstream Project + url: https://github.com/prometheus-operator/kube-prometheus + artifacthub.io/operator: "true" + catalog.cattle.io/auto-install: rancher-monitoring-crd=match + catalog.cattle.io/certified: rancher + catalog.cattle.io/display-name: Monitoring + catalog.cattle.io/namespace: cattle-monitoring-system + catalog.cattle.io/provides-gvr: monitoring.coreos.com.prometheus/v1 + catalog.cattle.io/release-name: rancher-monitoring + catalog.cattle.io/requests-cpu: 4500m + catalog.cattle.io/requests-memory: 4000Mi + catalog.cattle.io/ui-component: monitoring +apiVersion: v2 +appVersion: 0.46.0 +dependencies: +- condition: grafana.enabled + name: grafana + repository: file://./charts/grafana +- condition: k3sServer.enabled + name: k3sServer + repository: file://./charts/k3sServer +- condition: kubeStateMetrics.enabled + name: kube-state-metrics + repository: file://./charts/kube-state-metrics +- condition: kubeAdmControllerManager.enabled + name: kubeAdmControllerManager + repository: file://./charts/kubeAdmControllerManager +- condition: kubeAdmEtcd.enabled + name: kubeAdmEtcd + repository: file://./charts/kubeAdmEtcd +- condition: kubeAdmProxy.enabled + name: kubeAdmProxy + repository: file://./charts/kubeAdmProxy +- condition: kubeAdmScheduler.enabled + name: kubeAdmScheduler + repository: file://./charts/kubeAdmScheduler +- condition: prometheus-adapter.enabled + name: prometheus-adapter + repository: file://./charts/prometheus-adapter +- condition: nodeExporter.enabled + name: prometheus-node-exporter + repository: file://./charts/prometheus-node-exporter +- condition: rke2ControllerManager.enabled + name: rke2ControllerManager + repository: file://./charts/rke2ControllerManager +- condition: rke2Etcd.enabled + name: rke2Etcd + repository: file://./charts/rke2Etcd +- condition: rke2Proxy.enabled + name: rke2Proxy + repository: file://./charts/rke2Proxy +- condition: rke2Scheduler.enabled + name: rke2Scheduler + repository: file://./charts/rke2Scheduler +- condition: rkeControllerManager.enabled + name: rkeControllerManager + repository: file://./charts/rkeControllerManager +- condition: rkeEtcd.enabled + name: rkeEtcd + repository: file://./charts/rkeEtcd +- condition: rkeProxy.enabled + name: rkeProxy + repository: file://./charts/rkeProxy +- condition: rkeScheduler.enabled + name: rkeScheduler + repository: file://./charts/rkeScheduler +- condition: global.cattle.windows.enabled + name: windowsExporter + repository: file://./charts/windowsExporter +description: Collects several related Helm charts, Grafana dashboards, and Prometheus + rules combined with documentation and scripts to provide easy to operate end-to-end + Kubernetes cluster monitoring with Prometheus using the Prometheus Operator. +home: https://github.com/prometheus-operator/kube-prometheus +icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png +keywords: +- operator +- prometheus +- kube-prometheus +- monitoring +kubeVersion: '>=1.16.0-0' +maintainers: +- name: vsliouniaev +- name: bismarck +- email: gianrubio@gmail.com + name: gianrubio +- email: github.gkarthiks@gmail.com + name: gkarthiks +- email: scott@r6by.com + name: scottrigby +- email: miroslav.hadzhiev@gmail.com + name: Xtigyro +- email: arvind.iyengar@suse.com + name: Arvind +name: rancher-monitoring +sources: +- https://github.com/prometheus-community/helm-charts +- https://github.com/prometheus-operator/kube-prometheus +type: application +version: 14.5.100 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/README.md new file mode 100755 index 000000000..aa5d530f2 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/README.md @@ -0,0 +1,455 @@ +# kube-prometheus-stack + +Installs the [kube-prometheus stack](https://github.com/prometheus-operator/kube-prometheus), a collection of Kubernetes manifests, [Grafana](http://grafana.com/) dashboards, and [Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with [Prometheus](https://prometheus.io/) using the [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator). + +See the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) README for details about components, dashboards, and alerts. + +_Note: This chart was formerly named `prometheus-operator` chart, now renamed to more clearly reflect that it installs the `kube-prometheus` project stack, within which Prometheus Operator is only one component._ + +## Prerequisites + +- Kubernetes 1.16+ +- Helm 3+ + +## Get Repo Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +```console +# Helm +$ helm install [RELEASE_NAME] prometheus-community/kube-prometheus-stack +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Dependencies + +By default this chart installs additional, dependent charts: + +- [kubernetes/kube-state-metrics](https://github.com/kubernetes/kube-state-metrics/tree/master/charts/kube-state-metrics) +- [prometheus-community/prometheus-node-exporter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter) +- [grafana/grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana) + +To disable dependencies during installation, see [multiple releases](#multiple-releases) below. + +_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._ + +## Uninstall Chart + +```console +# Helm +$ helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +CRDs created by this chart are not removed by default and should be manually cleaned up: + +```console +kubectl delete crd alertmanagerconfigs.monitoring.coreos.com +kubectl delete crd alertmanagers.monitoring.coreos.com +kubectl delete crd podmonitors.monitoring.coreos.com +kubectl delete crd probes.monitoring.coreos.com +kubectl delete crd prometheuses.monitoring.coreos.com +kubectl delete crd prometheusrules.monitoring.coreos.com +kubectl delete crd servicemonitors.monitoring.coreos.com +kubectl delete crd thanosrulers.monitoring.coreos.com +``` + +## Upgrading Chart + +```console +# Helm +$ helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack +``` + +With Helm v3, CRDs created by this chart are not updated by default and should be manually updated. +Consult also the [Helm Documentation on CRDs](https://helm.sh/docs/chart_best_practices/custom_resource_definitions). + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. + +### From 13.x to 14.x + +Version 14 upgrades prometheus-operator from 0.45.x to 0.46.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRDs manually before updating: + +```console +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.46.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml +``` + +### From 12.x to 13.x + +Version 13 upgrades prometheus-operator from 0.44.x to 0.45.x. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating: + +```console +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.45.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml +``` + +### From 11.x to 12.x + +The chart was migrated to support only helm v3 and later. + +### From 10.x to 11.x + +Version 11 upgrades prometheus-operator from 0.42.x to 0.43.x. Starting with 0.43.x an additional `AlertmanagerConfigs` CRD is introduced. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating: + +```console +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.43/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml +``` + +Version 11 removes the deprecated tlsProxy via ghostunnel in favor of native TLS support the prometheus-operator gained with v0.39.0. + +### From 9.x to 10.x + +Version 10 upgrades prometheus-operator from 0.38.x to 0.42.x. Starting with 0.40.x an additional `Probes` CRD is introduced. Helm does not automatically upgrade or install new CRDs on a chart upgrade, so you have to install the CRD manually before updating: + +```console +kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/release-0.42/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml +``` + +### From 8.x to 9.x + +Version 9 of the helm chart removes the existing `additionalScrapeConfigsExternal` in favour of `additionalScrapeConfigsSecret`. This change lets users specify the secret name and secret key to use for the additional scrape configuration of prometheus. This is useful for users that have prometheus-operator as a subchart and also have a template that creates the additional scrape configuration. + +### From 7.x to 8.x + +Due to new template functions being used in the rules in version 8.x.x of the chart, an upgrade to Prometheus Operator and Prometheus is necessary in order to support them. First, upgrade to the latest version of 7.x.x + +```console +helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version 7.5.0 +``` + +Then upgrade to 8.x.x + +```console +helm upgrade [RELEASE_NAME] prometheus-community/kube-prometheus-stack --version [8.x.x] +``` + +Minimal recommended Prometheus version for this chart release is `2.12.x` + +### From 6.x to 7.x + +Due to a change in grafana subchart, version 7.x.x now requires Helm >= 2.12.0. + +### From 5.x to 6.x + +Due to a change in deployment labels of kube-state-metrics, the upgrade requires `helm upgrade --force` in order to re-create the deployment. If this is not done an error will occur indicating that the deployment cannot be modified: + +```console +invalid: spec.selector: Invalid value: v1.LabelSelector{MatchLabels:map[string]string{"app.kubernetes.io/name":"kube-state-metrics"}, MatchExpressions:[]v1.LabelSelectorRequirement(nil)}: field is immutable +``` + +If this error has already been encountered, a `helm history` command can be used to determine which release has worked, then `helm rollback` to the release, then `helm upgrade --force` to this new one + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments: + +```console +helm show values prometheus-community/kube-prometheus-stack +``` + +You may also run `helm show values` on this chart's [dependencies](#dependencies) for additional options. + +### Rancher Monitoring Configuration + +The following table shows values exposed by Rancher Monitoring's additions to the chart: + +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `nameOverride` | Provide a name that should be used instead of the chart name when naming all resources deployed by this chart |`"rancher-monitoring"`| +| `namespaceOverride` | Override the deployment namespace | `"cattle-monitoring-system"` | +| `global.rbac.userRoles.create` | Create default user ClusterRoles to allow users to interact with Prometheus CRs, ConfigMaps, and Secrets | `true` | +| `global.rbac.userRoles.aggregateToDefaultRoles` | Aggregate default user ClusterRoles into default k8s ClusterRoles | `true` | +| `prometheus-adapter.enabled` | Whether to install [prometheus-adapter](https://github.com/helm/charts/tree/master/stable/prometheus-adapter) within the cluster | `true` | +| `prometheus-adapter.prometheus.url` | A URL pointing to the Prometheus deployment within your cluster. The default value is set based on the assumption that you plan to deploy the default Prometheus instance from this chart where `.Values.namespaceOverride=cattle-monitoring-system` and `.Values.nameOverride=rancher-monitoring` | `http://rancher-monitoring-prometheus.cattle-monitoring-system.svc` | +| `prometheus-adapter.prometheus.port` | The port on the Prometheus deployment that Prometheus Adapter can make requests to | `9090` | +| `prometheus.prometheusSpec.ignoreNamespaceSelectors` | Ignore NamespaceSelector settings from the PodMonitor and ServiceMonitor configs. If true, PodMonitors and ServiceMonitors can only discover Pods and Services within the namespace they are deployed into | `false` | +| `alertmanager.secret.cleanupOnUninstall` | Whether or not to trigger a job to clean up the alertmanager config secret to be deleted on a `helm uninstall`. By default, this is disabled to prevent the loss of alerting configuration on an uninstall. | `false` | +| `alertmanager.secret.image.pullPolicy` | Image pull policy for job(s) related to alertmanager config secret's lifecycle | `IfNotPresent` | +| `alertmanager.secret.image.repository` | Repository to use for job(s) related to alertmanager config secret's lifecycle | `rancher/rancher-agent` | +| `alertmanager.secret.image.tag` | Tag to use for job(s) related to alertmanager config secret's lifecycle | `v2.4.8` | + +The following values are enabled for different distributions via [rancher-pushprox](https://github.com/rancher/dev-charts/tree/master/packages/rancher-pushprox). See the rancher-pushprox `README.md` for more information on what all values can be configured for the PushProxy chart. + +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `rkeControllerManager.enabled` | Create a PushProx installation for monitoring kube-controller-manager metrics in RKE clusters | `false` | +| `rkeScheduler.enabled` | Create a PushProx installation for monitoring kube-scheduler metrics in RKE clusters | `false` | +| `rkeProxy.enabled` | Create a PushProx installation for monitoring kube-proxy metrics in RKE clusters | `false` | +| `rkeEtcd.enabled` | Create a PushProx installation for monitoring etcd metrics in RKE clusters | `false` | +| `k3sServer.enabled` | Create a PushProx installation for monitoring k3s-server metrics (accounts for kube-controller-manager, kube-scheduler, and kube-proxy metrics) in k3s clusters | `false` | +| `kubeAdmControllerManager.enabled` | Create a PushProx installation for monitoring kube-controller-manager metrics in kubeAdm clusters | `false` | +| `kubeAdmScheduler.enabled` | Create a PushProx installation for monitoring kube-scheduler metrics in kubeAdm clusters | `false` | +| `kubeAdmProxy.enabled` | Create a PushProx installation for monitoring kube-proxy metrics in kubeAdm clusters | `false` | +| `kubeAdmEtcd.enabled` | Create a PushProx installation for monitoring etcd metrics in kubeAdm clusters | `false` | + + +### Multiple releases + +The same chart can be used to run multiple Prometheus instances in the same cluster if required. To achieve this, it is necessary to run only one instance of prometheus-operator and a pair of alertmanager pods for an HA configuration, while all other components need to be disabled. To disable a dependency during installation, set `kubeStateMetrics.enabled`, `nodeExporter.enabled` and `grafana.enabled` to `false`. + +## Work-Arounds for Known Issues + +### Running on private GKE clusters + +When Google configure the control plane for private clusters, they automatically configure VPC peering between your Kubernetes cluster’s network and a separate Google managed project. In order to restrict what Google are able to access within your cluster, the firewall rules configured restrict access to your Kubernetes pods. This means that in order to use the webhook component with a GKE private cluster, you must configure an additional firewall rule to allow the GKE control plane access to your webhook pod. + +You can read more information on how to add firewall rules for the GKE control plane nodes in the [GKE docs](https://cloud.google.com/kubernetes-engine/docs/how-to/private-clusters#add_firewall_rules) + +Alternatively, you can disable the hooks by setting `prometheusOperator.admissionWebhooks.enabled=false`. + +## PrometheusRules Admission Webhooks + +With Prometheus Operator version 0.30+, the core Prometheus Operator pod exposes an endpoint that will integrate with the `validatingwebhookconfiguration` Kubernetes feature to prevent malformed rules from being added to the cluster. + +### How the Chart Configures the Hooks + +A validating and mutating webhook configuration requires the endpoint to which the request is sent to use TLS. It is possible to set up custom certificates to do this, but in most cases, a self-signed certificate is enough. The setup of this component requires some more complex orchestration when using helm. The steps are created to be idempotent and to allow turning the feature on and off without running into helm quirks. + +1. A pre-install hook provisions a certificate into the same namespace using a format compatible with provisioning using end-user certificates. If the certificate already exists, the hook exits. +2. The prometheus operator pod is configured to use a TLS proxy container, which will load that certificate. +3. Validating and Mutating webhook configurations are created in the cluster, with their failure mode set to Ignore. This allows rules to be created by the same chart at the same time, even though the webhook has not yet been fully set up - it does not have the correct CA field set. +4. A post-install hook reads the CA from the secret created by step 1 and patches the Validating and Mutating webhook configurations. This process will allow a custom CA provisioned by some other process to also be patched into the webhook configurations. The chosen failure policy is also patched into the webhook configurations + +### Alternatives + +It should be possible to use [jetstack/cert-manager](https://github.com/jetstack/cert-manager) if a more complete solution is required, but it has not been tested. + +You can enable automatic self-signed TLS certificate provisioning via cert-manager by setting the `prometheusOperator.admissionWebhooks.certManager.enabled` value to true. + +### Limitations + +Because the operator can only run as a single pod, there is potential for this component failure to cause rule deployment failure. Because this risk is outweighed by the benefit of having validation, the feature is enabled by default. + +## Developing Prometheus Rules and Grafana Dashboards + +This chart Grafana Dashboards and Prometheus Rules are just a copy from [prometheus-operator/prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) and other sources, synced (with alterations) by scripts in [hack](hack) folder. In order to introduce any changes you need to first [add them to the original repo](https://github.com/prometheus-operator/kube-prometheus/blob/master/docs/developing-prometheus-rules-and-grafana-dashboards.md) and then sync there by scripts. + +## Further Information + +For more in-depth documentation of configuration options meanings, please see + +- [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) +- [Prometheus](https://prometheus.io/docs/introduction/overview/) +- [Grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana#grafana-helm-chart) + +## prometheus.io/scrape + +The prometheus operator does not support annotation-based discovery of services, using the `PodMonitor` or `ServiceMonitor` CRD in its place as they provide far more configuration options. +For information on how to use PodMonitors/ServiceMonitors, please see the documentation on the `prometheus-operator/prometheus-operator` documentation here: + +- [ServiceMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md#include-servicemonitors) +- [PodMonitors](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/getting-started.md#include-podmonitors) +- [Running Exporters](https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/running-exporters.md) + +By default, Prometheus discovers PodMonitors and ServiceMonitors within its namespace, that are labeled with the same release tag as the prometheus-operator release. +Sometimes, you may need to discover custom PodMonitors/ServiceMonitors, for example used to scrape data from third-party applications. +An easy way of doing this, without compromising the default PodMonitors/ServiceMonitors discovery, is allowing Prometheus to discover all PodMonitors/ServiceMonitors within its namespace, without applying label filtering. +To do so, you can set `prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues` and `prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues` to `false`. + +## Migrating from stable/prometheus-operator chart + +## Zero downtime + +Since `kube-prometheus-stack` is fully compatible with the `stable/prometheus-operator` chart, a migration without downtime can be achieved. +However, the old name prefix needs to be kept. If you want the new name please follow the step by step guide below (with downtime). + +You can override the name to achieve this: + +```console +helm upgrade prometheus-operator prometheus-community/kube-prometheus-stack -n monitoring --reuse-values --set nameOverride=prometheus-operator +``` + +**Note**: It is recommended to run this first with `--dry-run --debug`. + +## Redeploy with new name (downtime) + +If the **prometheus-operator** values are compatible with the new **kube-prometheus-stack** chart, please follow the below steps for migration: + +> The guide presumes that chart is deployed in `monitoring` namespace and the deployments are running there. If in other namespace, please replace the `monitoring` to the deployed namespace. + +1. Patch the PersistenceVolume created/used by the prometheus-operator chart to `Retain` claim policy: + + ```console + kubectl patch pv/ -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' + ``` + + **Note:** To execute the above command, the user must have a cluster wide permission. Please refer [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) + +2. Uninstall the **prometheus-operator** release and delete the existing PersistentVolumeClaim, and verify PV become Released. + + ```console + helm uninstall prometheus-operator -n monitoring + kubectl delete pvc/ -n monitoring + ``` + + Additionally, you have to manually remove the remaining `prometheus-operator-kubelet` service. + + ```console + kubectl delete service/prometheus-operator-kubelet -n kube-system + ``` + + You can choose to remove all your existing CRDs (ServiceMonitors, Podmonitors, etc.) if you want to. + +3. Remove current `spec.claimRef` values to change the PV's status from Released to Available. + + ```console + kubectl patch pv/ --type json -p='[{"op": "remove", "path": "/spec/claimRef"}]' -n monitoring + ``` + +**Note:** To execute the above command, the user must have a cluster wide permission. Please refer to [Kubernetes RBAC](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) + +After these steps, proceed to a fresh **kube-prometheus-stack** installation and make sure the current release of **kube-prometheus-stack** matching the `volumeClaimTemplate` values in the `values.yaml`. + +The binding is done via matching a specific amount of storage requested and with certain access modes. + +For example, if you had storage specified as this with **prometheus-operator**: + +```yaml +volumeClaimTemplate: + spec: + storageClassName: gp2 + accessModes: ["ReadWriteOnce"] + resources: + requests: + storage: 50Gi +``` + +You have to specify matching `volumeClaimTemplate` with 50Gi storage and `ReadWriteOnce` access mode. + +Additionally, you should check the current AZ of your legacy installation's PV, and configure the fresh release to use the same AZ as the old one. If the pods are in a different AZ than the PV, the release will fail to bind the existing one, hence creating a new PV. + +This can be achieved either by specifying the labels through `values.yaml`, e.g. setting `prometheus.prometheusSpec.nodeSelector` to: + +```yaml +nodeSelector: + failure-domain.beta.kubernetes.io/zone: east-west-1a +``` + +or passing these values as `--set` overrides during installation. + +The new release should now re-attach your previously released PV with its content. + +## Migrating from coreos/prometheus-operator chart + +The multiple charts have been combined into a single chart that installs prometheus operator, prometheus, alertmanager, grafana as well as the multitude of exporters necessary to monitor a cluster. + +There is no simple and direct migration path between the charts as the changes are extensive and intended to make the chart easier to support. + +The capabilities of the old chart are all available in the new chart, including the ability to run multiple prometheus instances on a single cluster - you will need to disable the parts of the chart you do not wish to deploy. + +You can check out the tickets for this change [here](https://github.com/prometheus-operator/prometheus-operator/issues/592) and [here](https://github.com/helm/charts/pull/6765). + +### High-level overview of Changes + +#### Added dependencies + +The chart has added 3 [dependencies](#dependencies). + +- Node-Exporter, Kube-State-Metrics: These components are loaded as dependencies into the chart, and are relatively simple components +- Grafana: The Grafana chart is more feature-rich than this chart - it contains a sidecar that is able to load data sources and dashboards from configmaps deployed into the same cluster. For more information check out the [documentation for the chart](https://github.com/grafana/helm-charts/blob/main/charts/grafana/README.md) + +#### Kubelet Service + +Because the kubelet service has a new name in the chart, make sure to clean up the old kubelet service in the `kube-system` namespace to prevent counting container metrics twice. + +#### Persistent Volumes + +If you would like to keep the data of the current persistent volumes, it should be possible to attach existing volumes to new PVCs and PVs that are created using the conventions in the new chart. For example, in order to use an existing Azure disk for a helm release called `prometheus-migration` the following resources can be created: + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: pvc-prometheus-migration-prometheus-0 +spec: + accessModes: + - ReadWriteOnce + azureDisk: + cachingMode: None + diskName: pvc-prometheus-migration-prometheus-0 + diskURI: /subscriptions/f5125d82-2622-4c50-8d25-3f7ba3e9ac4b/resourceGroups/sample-migration-resource-group/providers/Microsoft.Compute/disks/pvc-prometheus-migration-prometheus-0 + fsType: "" + kind: Managed + readOnly: false + capacity: + storage: 1Gi + persistentVolumeReclaimPolicy: Delete + storageClassName: prometheus + volumeMode: Filesystem +``` + +```yaml +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + labels: + app: prometheus + prometheus: prometheus-migration-prometheus + name: prometheus-prometheus-migration-prometheus-db-prometheus-prometheus-migration-prometheus-0 + namespace: monitoring +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + storageClassName: prometheus + volumeMode: Filesystem + volumeName: pvc-prometheus-migration-prometheus-0 +``` + +The PVC will take ownership of the PV and when you create a release using a persistent volume claim template it will use the existing PVCs as they match the naming convention used by the chart. For other cloud providers similar approaches can be used. + +#### KubeProxy + +The metrics bind address of kube-proxy is default to `127.0.0.1:10249` that prometheus instances **cannot** access to. You should expose metrics by changing `metricsBindAddress` field value to `0.0.0.0:10249` if you want to collect them. + +Depending on the cluster, the relevant part `config.conf` will be in ConfigMap `kube-system/kube-proxy` or `kube-system/kube-proxy-config`. For example: + +```console +kubectl -n kube-system edit cm kube-proxy +``` + +```yaml +apiVersion: v1 +data: + config.conf: |- + apiVersion: kubeproxy.config.k8s.io/v1alpha1 + kind: KubeProxyConfiguration + # ... + # metricsBindAddress: 127.0.0.1:10249 + metricsBindAddress: 0.0.0.0:10249 + # ... + kubeconfig.conf: |- + # ... +kind: ConfigMap +metadata: + labels: + app: kube-proxy + name: kube-proxy + namespace: kube-system +``` diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/app-README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/app-README.md new file mode 100755 index 000000000..af77e04ec --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/app-README.md @@ -0,0 +1,15 @@ +# Rancher Monitoring and Alerting + + This chart is based on the upstream [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) chart. The chart deploys [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) and its CRDs along with [Grafana](https://github.com/grafana/helm-charts/tree/main/charts/grafana), [Prometheus Adapter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-adapter) and additional charts / Kubernetes manifests to gather metrics. It allows users to monitor their Kubernetes clusters, view metrics in Grafana dashboards, and set up alerts and notifications. + +For more information on how to use the feature, refer to our [docs](https://rancher.com/docs/rancher/v2.x/en/monitoring-alerting/v2.5/). + +The chart installs the following components: + +- [Prometheus Operator](https://github.com/coreos/prometheus-operator) - The operator provides easy monitoring definitions for Kubernetes services, manages [Prometheus](https://prometheus.io/) and [AlertManager](https://prometheus.io/docs/alerting/latest/alertmanager/) instances, and adds default scrape targets for some Kubernetes components. +- [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus/) - A collection of community-curated Kubernetes manifests, Grafana Dashboards, and PrometheusRules that deploy a default end-to-end cluster monitoring configuration. +- [Grafana](https://github.com/helm/charts/tree/master/stable/grafana) - Grafana allows a user to create / view dashboards based on the cluster metrics collected by Prometheus. +- [node-exporter](https://github.com/helm/charts/tree/master/stable/prometheus-node-exporter) / [kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) / [rancher-pushprox](https://github.com/rancher/charts/tree/dev-v2.5/packages/rancher-pushprox/charts) - These charts monitor various Kubernetes components across different Kubernetes cluster types. +- [Prometheus Adapter](https://github.com/helm/charts/tree/master/stable/prometheus-adapter) - The adapter allows a user to expose custom metrics, resource metrics, and external metrics on the default [Prometheus](https://prometheus.io/) instance to the Kubernetes API Server. + +For more information, review the Helm README of this chart. diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/.helmignore new file mode 100755 index 000000000..8cade1318 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.vscode +.project +.idea/ +*.tmproj +OWNERS diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/Chart.yaml new file mode 100755 index 000000000..6f950a023 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/Chart.yaml @@ -0,0 +1,28 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-grafana +apiVersion: v2 +appVersion: 7.4.5 +description: The leading tool for querying and visualizing time series and metrics. +home: https://grafana.net +icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png +kubeVersion: ^1.8.0-0 +maintainers: +- email: zanhsieh@gmail.com + name: zanhsieh +- email: rluckie@cisco.com + name: rtluckie +- email: maor.friedman@redhat.com + name: maorfr +- email: miroslav.hadzhiev@gmail.com + name: Xtigyro +- email: mail@torstenwalter.de + name: torstenwalter +name: grafana +sources: +- https://github.com/grafana/grafana +type: application +version: 6.6.4 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/README.md new file mode 100755 index 000000000..957f019ec --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/README.md @@ -0,0 +1,514 @@ +# Grafana Helm Chart + +* Installs the web dashboarding system [Grafana](http://grafana.org/) + +## Get Repo Info + +```console +helm repo add grafana https://grafana.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release grafana/grafana +``` + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 4.0.0 (And 3.12.1) + +This version requires Helm >= 2.12.0. + +### To 5.0.0 + +You have to add --force to your helm upgrade command as the labels of the chart have changed. + +### To 6.0.0 + +This version requires Helm >= 3.1.0. + +## Configuration + +| Parameter | Description | Default | +|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `replicas` | Number of nodes | `1` | +| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` | +| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` | +| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` | +| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` | +| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`| +| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` | +| `priorityClassName` | Name of Priority Class to assign pods | `nil` | +| `image.repository` | Image repository | `grafana/grafana` | +| `image.tag` | Image tag (`Must be >= 5.0.0`) | `7.4.5` | +| `image.sha` | Image sha (optional) | `2b56f6106ddc376bb46d974230d530754bf65a640dfbc5245191d72d3b49efc6` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Image pull secrets | `{}` | +| `service.type` | Kubernetes service type | `ClusterIP` | +| `service.port` | Kubernetes port where service is exposed | `80` | +| `service.portName` | Name of the port on the service | `service` | +| `service.targetPort` | Internal service is port | `3000` | +| `service.nodePort` | Kubernetes service nodePort | `nil` | +| `service.annotations` | Service annotations | `{}` | +| `service.labels` | Custom labels | `{}` | +| `service.clusterIP` | internal cluster service IP | `nil` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` | +| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` | +| `service.externalIPs` | service external IP addresses | `[]` | +| `extraExposePorts` | Additional service ports for sidecar containers| `[]` | +| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` | +| `ingress.enabled` | Enables Ingress | `false` | +| `ingress.annotations` | Ingress annotations (values are templated) | `{}` | +| `ingress.labels` | Custom labels | `{}` | +| `ingress.path` | Ingress accepted path | `/` | +| `ingress.pathType` | Ingress type of path | `Prefix` | +| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` | +| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). | `[]` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `resources` | CPU/Memory resource requests/limits | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `affinity` | Affinity settings for pod assignment | `{}` | +| `extraInitContainers` | Init containers to add to the grafana pod | `{}` | +| `extraContainers` | Sidecar containers to add to the grafana pod | `{}` | +| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` | +| `extraLabels` | Custom labels for all manifests | `{}` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `persistence.enabled` | Use persistent volume to store data | `false` | +| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` | +| `persistence.size` | Size of persistent volume claim | `10Gi` | +| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` | +| `persistence.storageClassName` | Type of persistent volume claim | `nil` | +| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` | +| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` | +| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` | +| `persistence.subPath` | Mount a sub dir of the persistent volume | `nil` | +| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` | +| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` | +| `initChownData.enabled` | If false, don't reset data ownership at startup | true | +| `initChownData.image.repository` | init-chown-data container image repository | `busybox` | +| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` | +| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` | +| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` | +| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` | +| `schedulerName` | Alternate scheduler name | `nil` | +| `env` | Extra environment variables passed to pods | `{}` | +| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. | `{}` | +| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret | `{}` | +| `extraSecretMounts` | Additional grafana server secret mounts | `[]` | +| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` | +| `extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` | +| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` | +| `plugins` | Plugins to be loaded along with Grafana | `[]` | +| `datasources` | Configure grafana datasources (passed through tpl) | `{}` | +| `notifiers` | Configure grafana notifiers | `{}` | +| `dashboardProviders` | Configure grafana dashboard providers | `{}` | +| `dashboards` | Dashboards to import | `{}` | +| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` | +| `grafana.ini` | Grafana's primary configuration | `{}` | +| `ldap.enabled` | Enable LDAP authentication | `false` | +| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` | +| `ldap.config` | Grafana's LDAP configuration | `""` | +| `annotations` | Deployment annotations | `{}` | +| `labels` | Deployment labels | `{}` | +| `podAnnotations` | Pod annotations | `{}` | +| `podLabels` | Pod labels | `{}` | +| `podPortName` | Name of the grafana port on the pod | `grafana` | +| `sidecar.image.repository` | Sidecar image repository | `quay.io/kiwigrid/k8s-sidecar` | +| `sidecar.image.tag` | Sidecar image tag | `1.10.7` | +| `sidecar.image.sha` | Sidecar image sha (optional) | `""` | +| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | +| `sidecar.resources` | Sidecar resources | `{}` | +| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable | `false` | +| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | +| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` | +| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` | +| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` | +| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` | +| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` | +| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` | +| `sidecar.dashboards.provider.type` | Provider type | `file` | +| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` | +| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` | +| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` | +| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `nil` | +| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` | +| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` | +| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | +| `sidecar.dashboards.searchNamespace` | If specified, the sidecar will search for dashboard config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | +| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | +| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `nil` | +| `sidecar.datasources.searchNamespace` | If specified, the sidecar will search for datasources config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` | +| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` | +| `sidecar.notifiers.searchNamespace` | If specified, the sidecar will search for notifiers config-maps (or secrets) inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | +| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | +| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | +| `admin.existingSecret` | The name of an existing secret containing the admin credentials. | `""` | +| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` | +| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` | +| `serviceAccount.annotations` | ServiceAccount annotations | | +| `serviceAccount.create` | Create service account | `true` | +| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | +| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` | +| `rbac.create` | Create and use RBAC resources | `true` | +| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` | +| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` | +| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `true` | +| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `true` | +| `rbac.extraRoleRules` | Additional rules to add to the Role | [] | +| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | +| `command` | Define command to be executed by grafana container at startup | `nil` | +| `testFramework.enabled` | Whether to create test-related resources | `true` | +| `testFramework.image` | `test-framework` image repository. | `bats/bats` | +| `testFramework.tag` | `test-framework` image tag. | `v1.1.0` | +| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` | +| `testFramework.securityContext` | `test-framework` securityContext | `{}` | +| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` | +| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` | +| `downloadDashboardsImage.repository` | Curl docker image repo | `curlimages/curl` | +| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` | +| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` | +| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` | +| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) | +| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` | +| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | | +| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` | +| `serviceMonitor.path` | Path to scrape | `/metrics` | +| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` | +| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` | +| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` | +| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` | +| `serviceMonitor.relabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` | +| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` | +| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` | +| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` | +| `imageRenderer.image.tag` | image-renderer Image tag | `latest` | +| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` | +| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` | +| `imageRenderer.env` | extra env-vars for image-renderer | `{}` | +| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` | +| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` | +| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` | +| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` | +| `imageRenderer.service.portName` | image-renderer service port name | `'http'` | +| `imageRenderer.service.port` | image-renderer service port used by both service and deployment | `8081` | +| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` | +| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` | +| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` | +| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` | +| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` | +| `imageRenderer.resources` | Set resource limits for image-renderer pdos | `{}` | + +### Example ingress with path + +With grafana 6.3 and above +```yaml +grafana.ini: + server: + domain: monitoring.example.com + root_url: "%(protocol)s://%(domain)s/grafana" + serve_from_sub_path: true +ingress: + enabled: true + hosts: + - "monitoring.example.com" + path: "/grafana" +``` + +### Example of extraVolumeMounts + +```yaml +- extraVolumeMounts: + - name: plugins + mountPath: /var/lib/grafana/plugins + subPath: configs/grafana/plugins + existingClaim: existing-grafana-claim + readOnly: false +``` + +## Import dashboards + +There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +dashboards: + default: + some-dashboard: + json: | + { + "annotations": + + ... + # Complete json file here + ... + + "title": "Some Dashboard", + "uid": "abcd1234", + "version": 1 + } + custom-dashboard: + # This is a path to a file inside the dashboards directory inside the chart directory + file: dashboards/custom-dashboard.json + prometheus-stats: + # Ref: https://grafana.com/dashboards/2 + gnetId: 2 + revision: 2 + datasource: Prometheus + local-dashboard: + url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json +``` + +## BASE64 dashboards + +Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit) +A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk. +If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk. + +### Gerrit use case + +Gerrit API for download files has the following schema: where {project-name} and +{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard +the url value is + +## Sidecar for dashboards + +If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana +pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with +a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written +to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported +dashboards are deleted/updated. + +A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside +one configmap is currently not properly mirrored in grafana. + +Example dashboard config: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-grafana-dashboard + labels: + grafana_dashboard: "1" +data: + k8s-dashboard.json: |- + [...] +``` + +## Sidecar for datasources + +If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the data sources in grafana can be imported. + +Secrets are recommended over configmaps for this usecase because datasources usually contain private +data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): + +```yaml +datasources: + datasources.yaml: + apiVersion: 1 + datasources: + # name of the datasource. Required + - name: Graphite + # datasource type. Required + type: graphite + # access mode. proxy or direct (Server or Browser in the UI). Required + access: proxy + # org id. will default to orgId 1 if not specified + orgId: 1 + # url + url: http://localhost:8080 + # database password, if used + password: + # database user, if used + user: + # database name, if used + database: + # enable/disable basic auth + basicAuth: + # basic auth username + basicAuthUser: + # basic auth password + basicAuthPassword: + # enable/disable with credentials headers + withCredentials: + # mark as default datasource. Max one per org + isDefault: + # fields that will be converted to json and stored in json_data + jsonData: + graphiteVersion: "1.1" + tlsAuth: true + tlsAuthWithCACert: true + # json object of data that will be encrypted. + secureJsonData: + tlsCACert: "..." + tlsClientCert: "..." + tlsClientKey: "..." + version: 1 + # allow users to edit datasources from the UI. + editable: false +``` + +## Sidecar for notifiers + +If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the notification channels in grafana can be imported. The secrets must be created before +`helm install` so that the notifiers init container can list the secrets. + +Secrets are recommended over configmaps for this usecase because alert notification channels usually contain +private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels): + +```yaml +notifiers: + - name: notification-channel-1 + type: slack + uid: notifier1 + # either + org_id: 2 + # or + org_name: Main Org. + is_default: true + send_reminder: true + frequency: 1h + disable_resolve_message: false + # See `Supported Settings` section for settings supporter for each + # alert notification type. + settings: + recipient: 'XXX' + token: 'xoxb' + uploadImage: true + url: https://slack.com + +delete_notifiers: + - name: notification-channel-1 + uid: notifier1 + org_id: 2 + - name: notification-channel-2 + # default org_id: 1 +``` + +## How to serve Grafana with a path prefix (/grafana) + +In order to serve Grafana with a prefix (e.g., ), add the following to your values.yaml. + +```yaml +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + + path: /grafana/?(.*) + hosts: + - k8s.example.dev + +grafana.ini: + server: + root_url: http://localhost:3000/grafana # this host can be localhost +``` + +## How to securely reference secrets in grafana.ini + +This example uses Grafana uses [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets. + +In grafana.ini: + +```yaml +grafana.ini: + [auth.generic_oauth] + enabled = true + client_id = $__file{/etc/secrets/auth_generic_oauth/client_id} + client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret} +``` + +Existing secret, or created along with helm: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: auth-generic-oauth-secret +type: Opaque +stringData: + client_id: + client_secret: +``` + +Include in the `extraSecretMounts` configuration flag: + +```yaml +- extraSecretMounts: + - name: auth-generic-oauth-secret-mount + secretName: auth-generic-oauth-secret + defaultMode: 0440 + mountPath: /etc/secrets/auth_generic_oauth + readOnly: true +``` + +### extraSecretMounts using a Container Storage Interface (CSI) provider + +This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure) + +```yaml +- extraSecretMounts: + - name: secrets-store-inline + mountPath: /run/secrets + readOnly: true + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: "my-provider" + nodePublishSecretRef: + name: akv-creds +``` + +## Image Renderer Plug-In + +This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/docs/remote_rendering_using_docker.md) + +```yaml +imageRenderer: + enabled: true +``` + +### Image Renderer NetworkPolicy + +By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/dashboards/custom-dashboard.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/dashboards/custom-dashboard.json new file mode 100755 index 000000000..9e26dfeeb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/dashboards/custom-dashboard.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/NOTES.txt b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/NOTES.txt new file mode 100755 index 000000000..1fc8436d9 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/NOTES.txt @@ -0,0 +1,54 @@ +1. Get your '{{ .Values.adminUser }}' user password by running: + + kubectl get secret --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo + +2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}.svc.cluster.local +{{ if .Values.ingress.enabled }} + If you bind grafana to 80, please update values in values.yaml and reinstall: + ``` + securityContext: + runAsUser: 0 + runAsGroup: 0 + fsGroup: 0 + + command: + - "setcap" + - "'cap_net_bind_service=+ep'" + - "/usr/sbin/grafana-server &&" + - "sh" + - "/run.sh" + ``` + Details refer to https://grafana.com/docs/installation/configuration/#http-port. + Or grafana would always crash. + + From outside the cluster, the server URL(s) are: +{{- range .Values.ingress.hosts }} + http://{{ . }} +{{- end }} +{{ else }} + Get the Grafana URL to visit by running these commands in the same shell: +{{ if contains "NodePort" .Values.service.type -}} + export NODE_PORT=$(kubectl get --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{ else if contains "LoadBalancer" .Values.service.type -}} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ template "grafana.namespace" . }} -w {{ template "grafana.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + http://$SERVICE_IP:{{ .Values.service.port -}} +{{ else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ template "grafana.namespace" . }} -l "app.kubernetes.io/name={{ template "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ template "grafana.namespace" . }} port-forward $POD_NAME 3000 +{{- end }} +{{- end }} + +3. Login with the password from step 1 and the username: {{ .Values.adminUser }} + +{{- if not .Values.persistence.enabled }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Grafana pod is terminated. ##### +################################################################################# +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/_helpers.tpl new file mode 100755 index 000000000..76ad78876 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/_helpers.tpl @@ -0,0 +1,145 @@ +# Rancher +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "grafana.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "grafana.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "grafana.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account +*/}} +{{- define "grafana.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "grafana.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "grafana.serviceAccountNameTest" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} +{{- else -}} + {{ default "default" .Values.serviceAccount.nameTest }} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "grafana.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "grafana.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.extraLabels }} +{{ toYaml .Values.extraLabels }} +{{- end }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "grafana.selectorLabels" -}} +app.kubernetes.io/name: {{ include "grafana.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "grafana.imageRenderer.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.imageRenderer.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels ImageRenderer +*/}} +{{- define "grafana.imageRenderer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "rbac.apiVersion" -}} +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/_pod.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/_pod.tpl new file mode 100755 index 000000000..2ba9f115c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/_pod.tpl @@ -0,0 +1,496 @@ +{{- define "grafana.pod" -}} +{{- if .Values.schedulerName }} +schedulerName: "{{ .Values.schedulerName }}" +{{- end }} +serviceAccountName: {{ template "grafana.serviceAccountName" . }} +{{- if .Values.securityContext }} +securityContext: +{{ toYaml .Values.securityContext | indent 2 }} +{{- end }} +{{- if .Values.hostAliases }} +hostAliases: +{{ toYaml .Values.hostAliases | indent 2 }} +{{- end }} +{{- if .Values.priorityClassName }} +priorityClassName: {{ .Values.priorityClassName }} +{{- end }} +{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.sidecar.datasources.enabled .Values.sidecar.notifiers.enabled .Values.extraInitContainers) }} +initContainers: +{{- end }} +{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} + - name: init-chown-data + {{- if .Values.initChownData.image.sha }} + image: "{{ template "system_default_registry" . }}{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}" + {{- else }} + image: "{{ template "system_default_registry" . }}{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} + securityContext: + runAsNonRoot: false + runAsUser: 0 + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }}", "/var/lib/grafana"] + resources: +{{ toYaml .Values.initChownData.resources | indent 6 }} + volumeMounts: + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} +{{- end }} +{{- if .Values.dashboards }} + - name: download-dashboards + {{- if .Values.downloadDashboardsImage.sha }} + image: "{{ template "system_default_registry" . }}{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}" + {{- else }} + image: "{{ template "system_default_registry" . }}{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} + command: ["/bin/sh"] + args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh /etc/grafana/download_dashboards.sh" ] + resources: +{{ toYaml .Values.downloadDashboards.resources | indent 6 }} + env: +{{- range $key, $value := .Values.downloadDashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" +{{- end }} +{{- if .Values.downloadDashboards.envFromSecret }} + envFrom: + - secretRef: + name: {{ tpl .Values.downloadDashboards.envFromSecret . }} +{{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/download_dashboards.sh" + subPath: download_dashboards.sh + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} +{{- end }} +{{- if .Values.sidecar.datasources.enabled }} + - name: {{ template "grafana.name" . }}-sc-datasources + {{- if .Values.sidecar.image.sha }} + image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + {{- if .Values.sidecar.datasources.envFromSecret }} + envFrom: + - secretRef: + name: {{ tpl .Values.sidecar.datasources.envFromSecret . }} + {{- end }} + env: + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + {{- if .Values.sidecar.datasources.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.sidecar.datasources.labelValue }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: "both" + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.datasources.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: {{ template "grafana.name" . }}-sc-notifiers + {{- if .Values.sidecar.image.sha }} + image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.notifiers.label }}" + - name: FOLDER + value: "/etc/grafana/provisioning/notifiers" + - name: RESOURCE + value: "both" + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.sidecar.notifiers.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.notifiers.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} +{{- if .Values.extraInitContainers }} +{{ toYaml .Values.extraInitContainers | indent 2 }} +{{- end }} +{{- if .Values.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end}} +{{- end }} +containers: +{{- if .Values.sidecar.dashboards.enabled }} + - name: {{ template "grafana.name" . }}-sc-dashboard + {{- if .Values.sidecar.image.sha }} + image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ template "system_default_registry" . }}{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: METHOD + value: {{ .Values.sidecar.dashboards.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.dashboards.label }}" + {{- if .Values.sidecar.dashboards.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.sidecar.dashboards.labelValue }} + {{- end }} + - name: FOLDER + value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: "both" + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.sidecar.dashboards.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.dashboards.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + {{- if .Values.sidecar.dashboards.folderAnnotation }} + - name: FOLDER_ANNOTATION + value: "{{ .Values.sidecar.dashboards.folderAnnotation }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} +{{- end}} + - name: {{ .Chart.Name }} + {{- if .Values.image.sha }} + image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}@sha256:{{ .Values.image.sha }}" + {{- else }} + image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.command }} + command: + {{- range .Values.command }} + - {{ . }} + {{- end }} + {{- end}} +{{- if .Values.containerSecurityContext }} + securityContext: +{{- toYaml .Values.containerSecurityContext | nindent 6 }} +{{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/grafana.ini" + subPath: grafana.ini + {{- if .Values.ldap.enabled }} + - name: ldap + mountPath: "/etc/grafana/ldap.toml" + subPath: ldap.toml + {{- end }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} +{{- if .Values.dashboards }} +{{- range $provider, $dashboards := .Values.dashboards }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} + - name: dashboards-{{ $provider }} + mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + subPath: "{{ $key }}.json" +{{- end }} +{{- end }} +{{- end }} +{{- end -}} +{{- if .Values.dashboardsConfigMaps }} +{{- range (keys .Values.dashboardsConfigMaps | sortAlpha) }} + - name: dashboards-{{ . }} + mountPath: "/var/lib/grafana/dashboards/{{ . }}" +{{- end }} +{{- end }} +{{- if .Values.datasources }} + - name: config + mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml" + subPath: datasources.yaml +{{- end }} +{{- if .Values.notifiers }} + - name: config + mountPath: "/etc/grafana/provisioning/notifiers/notifiers.yaml" + subPath: notifiers.yaml +{{- end }} +{{- if .Values.dashboardProviders }} + - name: config + mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml" + subPath: dashboardproviders.yaml +{{- end }} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} +{{ if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" + subPath: provider.yaml +{{- end}} +{{- end}} +{{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + subPath: {{ .subPath | default "" }} + {{- end }} + {{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + ports: + - name: {{ .Values.service.portName }} + containerPort: {{ .Values.service.targetPort }} + protocol: TCP + - name: {{ .Values.podPortName }} + containerPort: 3000 + protocol: TCP + env: + {{- if not .Values.env.GF_SECURITY_ADMIN_USER }} + - name: GF_SECURITY_ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) }} + - name: GF_SECURITY_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if .Values.plugins }} + - name: GF_INSTALL_PLUGINS + valueFrom: + configMapKeyRef: + name: {{ template "grafana.fullname" . }} + key: plugins + {{- end }} + {{- if .Values.smtp.existingSecret }} + - name: GF_SMTP_USER + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.userKey | default "user" }} + - name: GF_SMTP_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.passwordKey | default "password" }} + {{- end }} + {{ if .Values.imageRenderer.enabled }} + - name: GF_RENDERING_SERVER_URL + value: http://{{ template "grafana.fullname" . }}-image-renderer.{{ template "grafana.namespace" . }}:{{ .Values.imageRenderer.service.port }}/render + - name: GF_RENDERING_CALLBACK_URL + value: http://{{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }} + {{ end }} + {{- range $key, $value := .Values.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: +{{ toYaml $value | indent 10 }} + {{- end }} +{{- range $key, $value := .Values.env }} + - name: "{{ tpl $key $ }}" + value: "{{ tpl (print $value) $ }}" +{{- end }} + {{- if .Values.envFromSecret }} + envFrom: + - secretRef: + name: {{ tpl .Values.envFromSecret . }} + {{- end }} + {{- if .Values.envRenderSecret }} + envFrom: + - secretRef: + name: {{ template "grafana.fullname" . }}-env + {{- end }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 6 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 6 }} + resources: +{{ toYaml .Values.resources | indent 6 }} +{{- with .Values.extraContainers }} +{{ tpl . $ | indent 2 }} +{{- end }} +nodeSelector: {{ include "linux-node-selector" . | nindent 2 }} +{{- if .Values.nodeSelector }} +{{ toYaml .Values.nodeSelector | indent 2 }} +{{- end }} +{{- with .Values.affinity }} +affinity: +{{ toYaml . | indent 2 }} +{{- end }} +tolerations: {{ include "linux-node-tolerations" . | nindent 2 }} +{{- if .Values.tolerations }} +{{ toYaml .Values.tolerations | indent 2 }} +{{- end }} +volumes: + - name: config + configMap: + name: {{ template "grafana.fullname" . }} +{{- range .Values.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} +{{- end }} + {{- if .Values.dashboards }} + {{- range (keys .Values.dashboards | sortAlpha) }} + - name: dashboards-{{ . }} + configMap: + name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }} + {{- end }} + {{- end }} + {{- if .Values.dashboardsConfigMaps }} + {{ $root := . }} + {{- range $provider, $name := .Values.dashboardsConfigMaps }} + - name: dashboards-{{ $provider }} + configMap: + name: {{ tpl $name $root }} + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + - name: ldap + secret: + {{- if .Values.ldap.existingSecret }} + secretName: {{ .Values.ldap.existingSecret }} + {{- else }} + secretName: {{ template "grafana.fullname" . }} + {{- end }} + items: + - key: ldap-toml + path: ldap.toml + {{- end }} +{{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} + - name: storage + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }} +{{- else if and .Values.persistence.enabled (eq .Values.persistence.type "statefulset") }} +# nothing +{{- else }} + - name: storage +{{- if .Values.persistence.inMemory.enabled }} + emptyDir: + medium: Memory +{{- if .Values.persistence.inMemory.sizeLimit }} + sizeLimit: {{ .Values.persistence.inMemory.sizeLimit }} +{{- end -}} +{{- else }} + emptyDir: {} +{{- end -}} +{{- end -}} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + emptyDir: {} +{{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + configMap: + name: {{ template "grafana.fullname" . }}-config-dashboards +{{- end }} +{{- end }} +{{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + emptyDir: {} +{{- end -}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + emptyDir: {} +{{- end -}} +{{- range .Values.extraSecretMounts }} +{{- if .secretName }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + defaultMode: {{ .defaultMode }} +{{- else if .projected }} + - name: {{ .name }} + projected: {{- toYaml .projected | nindent 6 }} +{{- else if .csi }} + - name: {{ .name }} + csi: {{- toYaml .csi | nindent 6 }} +{{- end }} +{{- end }} +{{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + persistentVolumeClaim: + claimName: {{ .existingClaim }} +{{- end }} +{{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + emptyDir: {} +{{- end -}} +{{- if .Values.extraContainerVolumes }} +{{ toYaml .Values.extraContainerVolumes | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/clusterrole.yaml new file mode 100755 index 000000000..f09e06563 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/clusterrole.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.rbac.create (not .Values.rbac.namespaced) (not .Values.rbac.useExistingRole) }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.fullname" . }}-clusterrole +{{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraClusterRoleRules) }} +rules: +{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }} +- apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end}} +{{- with .Values.rbac.extraClusterRoleRules }} +{{ toYaml . | indent 0 }} +{{- end}} +{{- else }} +rules: [] +{{- end}} +{{- end}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/clusterrolebinding.yaml new file mode 100755 index 000000000..4accbfac0 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/clusterrolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "grafana.fullname" . }}-clusterrolebinding + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ template "grafana.namespace" . }} +roleRef: + kind: ClusterRole +{{- if (not .Values.rbac.useExistingRole) }} + name: {{ template "grafana.fullname" . }}-clusterrole +{{- else }} + name: {{ .Values.rbac.useExistingRole }} +{{- end }} + apiGroup: rbac.authorization.k8s.io +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/configmap-dashboard-provider.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/configmap-dashboard-provider.yaml new file mode 100755 index 000000000..65d73858e --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/configmap-dashboard-provider.yaml @@ -0,0 +1,29 @@ +{{- if .Values.sidecar.dashboards.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.fullname" . }}-config-dashboards + namespace: {{ template "grafana.namespace" . }} +data: + provider.yaml: |- + apiVersion: 1 + providers: + - name: '{{ .Values.sidecar.dashboards.provider.name }}' + orgId: {{ .Values.sidecar.dashboards.provider.orgid }} + {{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + folder: '{{ .Values.sidecar.dashboards.provider.folder }}' + {{- end}} + type: {{ .Values.sidecar.dashboards.provider.type }} + disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} + allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }} + updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }} + options: + foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} +{{- end}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/configmap.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/configmap.yaml new file mode 100755 index 000000000..de32b7ab2 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/configmap.yaml @@ -0,0 +1,80 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +data: +{{- if .Values.plugins }} + plugins: {{ join "," .Values.plugins }} +{{- end }} + grafana.ini: | +{{- range $key, $value := index .Values "grafana.ini" }} + [{{ $key }}] + {{- range $elem, $elemVal := $value }} + {{- if kindIs "invalid" $elemVal }} + {{ $elem }} = + {{- else }} + {{ $elem }} = {{ tpl (toYaml $elemVal) $ }} + {{- end }} + {{- end }} +{{- end }} + +{{- if .Values.datasources }} +{{ $root := . }} + {{- range $key, $value := .Values.datasources }} + {{ $key }}: | +{{ tpl (toYaml $value | indent 4) $root }} + {{- end -}} +{{- end -}} + +{{- if .Values.notifiers }} + {{- range $key, $value := .Values.notifiers }} + {{ $key }}: | +{{ toYaml $value | indent 4 }} + {{- end -}} +{{- end -}} + +{{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{ $key }}: | +{{ toYaml $value | indent 4 }} + {{- end -}} +{{- end -}} + +{{- if .Values.dashboards }} + download_dashboards.sh: | + #!/usr/bin/env sh + set -euf + {{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{- range $value.providers }} + mkdir -p {{ .options.path }} + {{- end }} + {{- end }} + {{- end }} + + {{- range $provider, $dashboards := .Values.dashboards }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} + curl -skf \ + --connect-timeout 60 \ + --max-time 60 \ + {{- if not $value.b64content }} + -H "Accept: application/json" \ + {{- if $value.token }} + -H "Authorization: token {{ $value.token }}" \ + {{- end }} + -H "Content-Type: application/json;charset=UTF-8" \ + {{ end }} + {{- if $value.url -}}"{{ $value.url }}"{{- else -}}"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download"{{- end -}}{{ if $value.datasource }} | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \ + > "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + {{- end -}} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/dashboards-json-configmap.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/dashboards-json-configmap.yaml new file mode 100755 index 000000000..59e0be641 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/dashboards-json-configmap.yaml @@ -0,0 +1,35 @@ +{{- if .Values.dashboards }} +{{ $files := .Files }} +{{- range $provider, $dashboards := .Values.dashboards }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }} + namespace: {{ template "grafana.namespace" $ }} + labels: + {{- include "grafana.labels" $ | nindent 4 }} + dashboard-provider: {{ $provider }} +{{- if $dashboards }} +data: +{{- $dashboardFound := false }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} +{{- $dashboardFound = true }} +{{ print $key | indent 2 }}.json: +{{- if hasKey $value "json" }} + |- +{{ $value.json | indent 6 }} +{{- end }} +{{- if hasKey $value "file" }} +{{ toYaml ( $files.Get $value.file ) | indent 4}} +{{- end }} +{{- end }} +{{- end }} +{{- if not $dashboardFound }} + {} +{{- end }} +{{- end }} +--- +{{- end }} + +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/deployment.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/deployment.yaml new file mode 100755 index 000000000..4d77794cd --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/deployment.yaml @@ -0,0 +1,48 @@ +{{ if (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc")) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.labels }} +{{ toYaml .Values.labels | indent 4 }} +{{- end }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.replicas }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} +{{- with .Values.deploymentStrategy }} + strategy: +{{ toYaml . | trim | indent 4 }} +{{- end }} + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} +{{- with .Values.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} +{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.envRenderSecret }} + checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }} +{{- end }} +{{- with .Values.podAnnotations }} +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/headless-service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/headless-service.yaml new file mode 100755 index 000000000..2fa816e04 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/headless-service.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "grafana.fullname" . }}-headless + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + clusterIP: None + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} + type: ClusterIP +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/image-renderer-deployment.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/image-renderer-deployment.yaml new file mode 100755 index 000000000..d17b9dfed --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/image-renderer-deployment.yaml @@ -0,0 +1,117 @@ +{{ if .Values.imageRenderer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} +{{- if .Values.imageRenderer.labels }} +{{ toYaml .Values.imageRenderer.labels | indent 4 }} +{{- end }} +{{- with .Values.imageRenderer.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.imageRenderer.replicas }} + revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} +{{- with .Values.imageRenderer.deploymentStrategy }} + strategy: +{{ toYaml . | trim | indent 4 }} +{{- end }} + template: + metadata: + labels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }} +{{- with .Values.imageRenderer.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- with .Values.imageRenderer.podAnnotations }} +{{ toYaml . | indent 8 }} +{{- end }} + spec: + + {{- if .Values.imageRenderer.schedulerName }} + schedulerName: "{{ .Values.imageRenderer.schedulerName }}" + {{- end }} + {{- if .Values.imageRenderer.serviceAccountName }} + serviceAccountName: "{{ .Values.imageRenderer.serviceAccountName }}" + {{- else }} + serviceAccountName: {{ template "grafana.serviceAccountName" . }} + {{- end }} + {{- if .Values.imageRenderer.securityContext }} + securityContext: + {{ toYaml .Values.imageRenderer.securityContext | indent 2 }} + {{- end }} + {{- if .Values.imageRenderer.hostAliases }} + hostAliases: + {{ toYaml .Values.imageRenderer.hostAliases | indent 2 }} + {{- end }} + {{- if .Values.imageRenderer.priorityClassName }} + priorityClassName: {{ .Values.imageRenderer.priorityClassName }} + {{- end }} + {{- if .Values.imageRenderer.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.imageRenderer.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ .Chart.Name }}-image-renderer + {{- if .Values.imageRenderer.image.sha }} + image: "{{ template "system_default_registry" . }}{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}" + {{- else }} + image: "{{ template "system_default_registry" . }}{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }} + {{- if .Values.imageRenderer.command }} + command: + {{- range .Values.imageRenderer.command }} + - {{ . }} + {{- end }} + {{- end}} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + containerPort: {{ .Values.imageRenderer.service.port }} + protocol: TCP + env: + - name: HTTP_PORT + value: {{ .Values.imageRenderer.service.port | quote }} + {{- range $key, $value := .Values.imageRenderer.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{- end }} + securityContext: + capabilities: + drop: ['all'] + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /tmp + name: image-renderer-tmpfs + {{- with .Values.imageRenderer.resources }} + resources: +{{ toYaml . | indent 12 }} + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} + {{- if .Values.imageRenderer.nodeSelector }} +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.imageRenderer.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} + {{- if .Values.imageRenderer.tolerations }} +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: image-renderer-tmpfs + emptyDir: {} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/image-renderer-network-policy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/image-renderer-network-policy.yaml new file mode 100755 index 000000000..f8ca73aab --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/image-renderer-network-policy.yaml @@ -0,0 +1,76 @@ +{{- if and (.Values.imageRenderer.enabled) (.Values.imageRenderer.networkPolicy.limitIngress) }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer-ingress + namespace: {{ template "grafana.namespace" . }} + annotations: + comment: Limit image-renderer ingress traffic from grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- if .Values.imageRenderer.podLabels }} + {{ toYaml .Values.imageRenderer.podLabels | nindent 6 }} + {{- end }} + + policyTypes: + - Ingress + ingress: + - ports: + - port: {{ .Values.imageRenderer.service.port }} + protocol: TCP + from: + - namespaceSelector: + matchLabels: + name: {{ template "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- if .Values.podLabels }} + {{ toYaml .Values.podLabels | nindent 14 }} + {{- end }} +{{ end }} + +{{- if and (.Values.imageRenderer.enabled) (.Values.imageRenderer.networkPolicy.limitEgress) }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer-egress + namespace: {{ template "grafana.namespace" . }} + annotations: + comment: Limit image-renderer egress traffic to grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- if .Values.imageRenderer.podLabels }} + {{ toYaml .Values.imageRenderer.podLabels | nindent 6 }} + {{- end }} + + policyTypes: + - Egress + egress: + # allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # talk only to grafana + - ports: + - port: {{ .Values.service.port }} + protocol: TCP + to: + - namespaceSelector: + matchLabels: + name: {{ template "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- if .Values.podLabels }} + {{ toYaml .Values.podLabels | nindent 14 }} + {{- end }} +{{ end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/image-renderer-service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/image-renderer-service.yaml new file mode 100755 index 000000000..f5d3eb02f --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/image-renderer-service.yaml @@ -0,0 +1,28 @@ +{{ if .Values.imageRenderer.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} +{{- if .Values.imageRenderer.service.labels }} +{{ toYaml .Values.imageRenderer.service.labels | indent 4 }} +{{- end }} +{{- with .Values.imageRenderer.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + type: ClusterIP + {{- if .Values.imageRenderer.service.clusterIP }} + clusterIP: {{ .Values.imageRenderer.service.clusterIP }} + {{end}} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + port: {{ .Values.imageRenderer.service.port }} + protocol: TCP + targetPort: {{ .Values.imageRenderer.service.targetPort }} + selector: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} +{{ end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/ingress.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/ingress.yaml new file mode 100755 index 000000000..44ebfc950 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/ingress.yaml @@ -0,0 +1,80 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "grafana.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +{{- $ingressPathType := .Values.ingress.pathType -}} +{{- $extraPaths := .Values.ingress.extraPaths -}} +{{- $newAPI := .Capabilities.APIVersions.Has "networking.k8s.io/v1/Ingress" -}} +{{- if $newAPI -}} +apiVersion: networking.k8s.io/v1 +{{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1/Ingress" }} +apiVersion: networking.k8s.io/v1beta1 +{{- else }} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} +{{- end }} + {{- if .Values.ingress.annotations }} + annotations: + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} +spec: + {{- if .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end -}} +{{- if .Values.ingress.tls }} + tls: +{{ tpl (toYaml .Values.ingress.tls) $ | indent 4 }} +{{- end }} + rules: + {{- if .Values.ingress.hosts }} + {{- range .Values.ingress.hosts }} + - host: {{ tpl . $}} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $newAPI }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $newAPI }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end }} + {{- else }} + - http: + paths: + - backend: + {{- if $newAPI }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + pathType: {{ $ingressPathType }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if $ingressPath }} + path: {{ $ingressPath }} + {{- end }} + {{- end -}} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/nginx-config.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/nginx-config.yaml new file mode 100755 index 000000000..f847c51ce --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/nginx-config.yaml @@ -0,0 +1,75 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: grafana-nginx-proxy-config + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +data: + nginx.conf: |- + worker_processes auto; + error_log /dev/stdout warn; + pid /var/cache/nginx/nginx.pid; + + events { + worker_connections 1024; + } + + http { + include /etc/nginx/mime.types; + log_format main '[$time_local - $status] $remote_addr - $remote_user $request ($http_referer)'; + + proxy_connect_timeout 10; + proxy_read_timeout 180; + proxy_send_timeout 5; + proxy_buffering off; + proxy_cache_path /var/cache/nginx/cache levels=1:2 keys_zone=my_zone:100m inactive=1d max_size=10g; + + server { + listen 8080; + access_log off; + + gzip on; + gzip_min_length 1k; + gzip_comp_level 2; + gzip_types text/plain application/javascript application/x-javascript text/css application/xml text/javascript image/jpeg image/gif image/png; + gzip_vary on; + gzip_disable "MSIE [1-6]\."; + + proxy_set_header Host $host; + + location /api/dashboards { + proxy_pass http://localhost:3000; + } + + location /api/search { + proxy_pass http://localhost:3000; + + sub_filter_types application/json; + sub_filter_once off; + sub_filter '"url":"/d' '"url":"d'; + } + + location / { + proxy_cache my_zone; + proxy_cache_valid 200 302 1d; + proxy_cache_valid 301 30d; + proxy_cache_valid any 5m; + proxy_cache_bypass $http_cache_control; + add_header X-Proxy-Cache $upstream_cache_status; + add_header Cache-Control "public"; + + proxy_pass http://localhost:3000/; + + sub_filter_types text/html; + sub_filter_once off; + sub_filter '"appSubUrl":""' '"appSubUrl":"."'; + sub_filter '"url":"/' '"url":"./'; + sub_filter ':"/avatar/' ':"avatar/'; + + if ($request_filename ~ .*\.(?:js|css|jpg|jpeg|gif|png|ico|cur|gz|svg|svgz|mp4|ogg|ogv|webm)$) { + expires 90d; + } + } + } + } diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/poddisruptionbudget.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/poddisruptionbudget.yaml new file mode 100755 index 000000000..61813a436 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/poddisruptionbudget.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.labels }} +{{ toYaml .Values.labels | indent 4 }} +{{- end }} +spec: +{{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} +{{- end }} +{{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} +{{- end }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/podsecuritypolicy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/podsecuritypolicy.yaml new file mode 100755 index 000000000..19da50791 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/podsecuritypolicy.yaml @@ -0,0 +1,49 @@ +{{- if .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.rbac.pspAnnotations }} + annotations: {{ toYaml .Values.rbac.pspAnnotations | nindent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + # Default set from Docker, without DAC_OVERRIDE or CHOWN + - FOWNER + - FSETID + - KILL + - SETGID + - SETUID + - SETPCAP + - NET_BIND_SERVICE + - NET_RAW + - SYS_CHROOT + - MKNOD + - AUDIT_WRITE + - SETFCAP + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'csi' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'RunAsAny' + fsGroup: + rule: 'RunAsAny' + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/pvc.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/pvc.yaml new file mode 100755 index 000000000..8d93f5c23 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/pvc.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.persistence.annotations }} + annotations: +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.persistence.finalizers }} + finalizers: +{{ toYaml . | indent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- if .Values.persistence.storageClassName }} + storageClassName: {{ .Values.persistence.storageClassName }} + {{- end -}} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: +{{ toYaml . | indent 6 }} + {{- end }} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/role.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/role.yaml new file mode 100755 index 000000000..54c3fb0b2 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/role.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraRoleRules))) }} +rules: +{{- if .Values.rbac.pspEnabled }} +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "grafana.fullname" . }}] +{{- end }} +{{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled) }} +- apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end }} +{{- with .Values.rbac.extraRoleRules }} +{{ toYaml . | indent 0 }} +{{- end}} +{{- else }} +rules: [] +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/rolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/rolebinding.yaml new file mode 100755 index 000000000..34f1ad6f8 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/rolebinding.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role +{{- if (not .Values.rbac.useExistingRole) }} + name: {{ template "grafana.fullname" . }} +{{- else }} + name: {{ .Values.rbac.useExistingRole }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ template "grafana.namespace" . }} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/secret-env.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/secret-env.yaml new file mode 100755 index 000000000..5c09313e6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/secret-env.yaml @@ -0,0 +1,14 @@ +{{- if .Values.envRenderSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "grafana.fullname" . }}-env + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +type: Opaque +data: +{{- range $key, $val := .Values.envRenderSecret }} + {{ $key }}: {{ $val | b64enc | quote }} +{{- end -}} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/secret.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/secret.yaml new file mode 100755 index 000000000..4fdd817da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/secret.yaml @@ -0,0 +1,22 @@ +{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +type: Opaque +data: + {{- if and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} + admin-user: {{ .Values.adminUser | b64enc | quote }} + {{- if .Values.adminPassword }} + admin-password: {{ .Values.adminPassword | b64enc | quote }} + {{- else }} + admin-password: {{ randAlphaNum 40 | b64enc | quote }} + {{- end }} + {{- end }} + {{- if not .Values.ldap.existingSecret }} + ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/service.yaml new file mode 100755 index 000000000..276456698 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/service.yaml @@ -0,0 +1,50 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4 }} +{{- end }} +{{- with .Values.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: +{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} + type: ClusterIP + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{end}} +{{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} + {{- end -}} +{{- else }} + type: {{ .Values.service.type }} +{{- end }} +{{- if .Values.service.externalIPs }} + externalIPs: +{{ toYaml .Values.service.externalIPs | indent 4 }} +{{- end }} + ports: + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + protocol: TCP + targetPort: {{ .Values.service.targetPort }} +{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{.Values.service.nodePort}} +{{ end }} + {{- if .Values.extraExposePorts }} + {{- tpl (toYaml .Values.extraExposePorts) . | indent 4 }} + {{- end }} + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} + diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/serviceaccount.yaml new file mode 100755 index 000000000..7576eeef0 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ template "grafana.namespace" . }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/servicemonitor.yaml new file mode 100755 index 000000000..23288523f --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/servicemonitor.yaml @@ -0,0 +1,40 @@ +{{- if .Values.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "grafana.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- if .Values.serviceMonitor.labels }} + {{- toYaml .Values.serviceMonitor.labels | nindent 4 }} + {{- end }} +spec: + endpoints: + - interval: {{ .Values.serviceMonitor.interval }} + {{- if .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }} + {{- end }} + honorLabels: true + port: {{ .Values.service.portName }} + path: {{ .Values.serviceMonitor.path }} + scheme: {{ .Values.serviceMonitor.scheme }} + {{- if .Values.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml .Values.serviceMonitor.tlsConfig | nindent 6 }} + {{- end }} + {{- if .Values.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + jobLabel: "{{ .Release.Name }}" + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 8 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/statefulset.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/statefulset.yaml new file mode 100755 index 000000000..b2b4616f3 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/statefulset.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + serviceName: {{ template "grafana.fullname" . }}-headless + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} +{{- with .Values.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} + {{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} +{{- end }} +{{- with .Values.podAnnotations }} +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: {{ .Values.persistence.accessModes }} + storageClassName: {{ .Values.persistence.storageClassName }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: +{{ toYaml . | indent 10 }} + {{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-configmap.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-configmap.yaml new file mode 100755 index 000000000..ff53aaf1b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-configmap.yaml @@ -0,0 +1,17 @@ +{{- if .Values.testFramework.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "grafana.fullname" . }}-test + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +data: + run.sh: |- + @test "Test Health" { + url="http://{{ template "grafana.fullname" . }}/api/health" + + code=$(wget --server-response --spider --timeout 10 --tries 1 ${url} 2>&1 | awk '/^ HTTP/{print $2}') + [ "$code" == "200" ] + } +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-podsecuritypolicy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-podsecuritypolicy.yaml new file mode 100755 index 000000000..1acd65128 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-podsecuritypolicy.yaml @@ -0,0 +1,30 @@ +{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "grafana.fullname" . }}-test + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +spec: + allowPrivilegeEscalation: true + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + fsGroup: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + volumes: + - configMap + - downwardAPI + - emptyDir + - projected + - csi + - secret +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-role.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-role.yaml new file mode 100755 index 000000000..6b10677ae --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-role.yaml @@ -0,0 +1,14 @@ +{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "grafana.fullname" . }}-test + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "grafana.fullname" . }}-test] +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-rolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-rolebinding.yaml new file mode 100755 index 000000000..58fa5e78b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-rolebinding.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.testFramework.enabled .Values.rbac.pspEnabled -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "grafana.fullname" . }}-test + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "grafana.fullname" . }}-test +subjects: +- kind: ServiceAccount + name: {{ template "grafana.serviceAccountNameTest" . }} + namespace: {{ template "grafana.namespace" . }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-serviceaccount.yaml new file mode 100755 index 000000000..5c3350733 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test-serviceaccount.yaml @@ -0,0 +1,9 @@ +{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + name: {{ template "grafana.serviceAccountNameTest" . }} + namespace: {{ template "grafana.namespace" . }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test.yaml new file mode 100755 index 000000000..cdc86e5f2 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/templates/tests/test.yaml @@ -0,0 +1,48 @@ +{{- if .Values.testFramework.enabled }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ template "grafana.fullname" . }}-test + labels: + {{- include "grafana.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success + namespace: {{ template "grafana.namespace" . }} +spec: + serviceAccountName: {{ template "grafana.serviceAccountNameTest" . }} + {{- if .Values.testFramework.securityContext }} + securityContext: {{ toYaml .Values.testFramework.securityContext | nindent 4 }} + {{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.affinity }} + affinity: +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: +{{ toYaml . | indent 4 }} + {{- end }} + containers: + - name: {{ .Release.Name }}-test + image: "{{ .Values.testFramework.image}}:{{ .Values.testFramework.tag }}" + imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}" + command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] + volumeMounts: + - mountPath: /tests + name: tests + readOnly: true + volumes: + - name: tests + configMap: + name: {{ template "grafana.fullname" . }}-test + restartPolicy: Never +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/values.yaml new file mode 100755 index 000000000..9491c1a1f --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/grafana/values.yaml @@ -0,0 +1,732 @@ +global: + cattle: + systemDefaultRegistry: "" + +autoscaling: + enabled: false +rbac: + create: true + ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) + # useExistingRole: name-of-some-(cluster)role + pspEnabled: true + pspAnnotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + namespaced: false + extraRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] +serviceAccount: + create: true + name: + nameTest: +# annotations: +# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here + +replicas: 1 + +## See `kubectl explain poddisruptionbudget.spec` for more +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} +# minAvailable: 1 +# maxUnavailable: 1 + +## See `kubectl explain deployment.spec.strategy` for more +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +deploymentStrategy: + type: RollingUpdate + +readinessProbe: + httpGet: + path: /api/health + port: 3000 + +livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + failureThreshold: 10 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: "default-scheduler" + +image: + repository: rancher/mirrored-grafana-grafana + tag: 7.4.5 + sha: "" + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +testFramework: + enabled: true + image: "rancher/mirrored-bats-bats" + tag: "v1.1.0" + imagePullPolicy: IfNotPresent + securityContext: + runAsNonRoot: true + runAsUser: 1000 + +securityContext: + runAsNonRoot: true + runAsUser: 472 + runAsGroup: 472 + fsGroup: 472 + +containerSecurityContext: + {} + +extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # subPath: certificates.crt # (optional) + # configMap: certs-configmap + # readOnly: true + + +extraEmptyDirMounts: [] + # - name: provisioning-notifiers + # mountPath: /etc/grafana/provisioning/notifiers + + +# Apply extra labels to common labels. +extraLabels: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: + +downloadDashboardsImage: + repository: rancher/mirrored-curlimages-curl + tag: 7.73.0 + sha: "" + pullPolicy: IfNotPresent + +downloadDashboards: + env: {} + envFromSecret: "" + resources: {} + +## Pod Annotations +# podAnnotations: {} + +## Pod Labels +# podLabels: {} + +podPortName: grafana + +## Deployment annotations +# annotations: {} + +## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +service: + type: ClusterIP + port: 80 + targetPort: 3000 + # targetPort: 4181 To be used with a proxy extraContainer + annotations: {} + labels: {} + portName: service + +serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + +extraExposePorts: [] + # - name: keycloak + # port: 8080 + # targetPort: 8080 + # type: ClusterIP + +# overrides pod.spec.hostAliases in the grafana deployment's pods +hostAliases: [] + # - ip: "1.2.3.4" + # hostnames: + # - "my.host.com" + +ingress: + enabled: false + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # Values can be templated + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: / + + # pathType is only for k8s > 1.19 + pathType: Prefix + + hosts: + - chart-example.local + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## Or for k8s > 1.19 + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: service + + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +# +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Affinity for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +extraInitContainers: [] + +## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod +extraContainers: | +# - name: proxy +# image: quay.io/gambol99/keycloak-proxy:latest +# args: +# - -provider=github +# - -client-id= +# - -client-secret= +# - -github-org= +# - -email-domain=* +# - -cookie-secret= +# - -http-address=http://0.0.0.0:4181 +# - -upstream-url=http://127.0.0.1:3000 +# ports: +# - name: proxy-web +# containerPort: 4181 + +## Volumes that can be used in init containers that will not be mounted to deployment pods +extraContainerVolumes: [] +# - name: volume-from-secret +# secret: +# secretName: secret-to-mount +# - name: empty-dir-volume +# emptyDir: {} + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + type: pvc + enabled: false + # storageClassName: default + accessModes: + - ReadWriteOnce + size: 10Gi + # annotations: {} + finalizers: + - kubernetes.io/pvc-protection + # selectorLabels: {} + # subPath: "" + # existingClaim: + + ## If persistence is not enabled, this allows to mount the + ## local storage in-memory to improve performance + ## + inMemory: + enabled: false + ## The maximum usage on memory medium EmptyDir would be + ## the minimum value between the SizeLimit specified + ## here and the sum of memory limits of all containers in a pod + ## + # sizeLimit: 300Mi + +initChownData: + ## If false, data ownership will not be reset at startup + ## This allows the prometheus-server to be run with an arbitrary user + ## + enabled: true + + ## initChownData container image + ## + image: + repository: rancher/mirrored-library-busybox + tag: "1.31.1" + sha: "" + pullPolicy: IfNotPresent + + ## initChownData resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + +# Administrator credentials when not using an existing secret (see below) +adminUser: admin +# adminPassword: strongpassword + +# Use an existing secret for the admin user. +admin: + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + +## Define command to be executed at startup by grafana container +## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) +## Default is "run.sh" as defined in grafana's Dockerfile +# command: +# - "sh" +# - "/run.sh" + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Extra environment variables that will be pass onto deployment pods +## +## to provide grafana with access to CloudWatch on AWS EKS: +## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) +## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the +## same oidc eks provider as noted before (same as the existing line) +## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name +## +## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", +## +## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess +## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) +## +## env: +## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here +## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token +## AWS_REGION: us-east-1 +## +## 5. uncomment the EKS section in extraSecretMounts: below +## 6. uncomment the annotation section in the serviceAccount: above +## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn + +env: {} + +## "valueFrom" environment variable references that will be added to deployment pods +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core +## Renders in container spec as: +## env: +## ... +## - name: +## valueFrom: +## +envValueFrom: {} + +## The name of a secret in the same kubernetes namespace which contain values to be added to the environment +## This can be useful for auth tokens, etc. Value is templated. +envFromSecret: "" + +## Sensible environment variables that will be rendered as new secret object +## This can be useful for auth tokens, etc +envRenderSecret: {} + +## Additional grafana server secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # secretName: grafana-secret-files + # readOnly: true + # subPath: "" + # + # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) + # - name: aws-iam-token + # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount + # readOnly: true + # projected: + # defaultMode: 420 + # sources: + # - serviceAccountToken: + # audience: sts.amazonaws.com + # expirationSeconds: 86400 + # path: token + # + # for CSI e.g. Azure Key Vault use the following + # - name: secrets-store-inline + # mountPath: /run/secrets + # readOnly: true + # csi: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "akv-grafana-spc" + # nodePublishSecretRef: # Only required when using service principal mode + # name: grafana-akv-creds # Only required when using service principal mode + +## Additional grafana server volume mounts +# Defines additional volume mounts. +extraVolumeMounts: [] + # - name: extra-volume + # mountPath: /mnt/volume + # readOnly: true + # existingClaim: volume-claim + +## Pass the plugins you want installed as a list. +## +plugins: [] + # - digrich-bubblechart-panel + # - grafana-clock-panel + +## Configure grafana datasources +## ref: http://docs.grafana.org/administration/provisioning/#datasources +## +datasources: {} +# datasources.yaml: +# apiVersion: 1 +# datasources: +# - name: Prometheus +# type: prometheus +# url: http://prometheus-prometheus-server +# access: proxy +# isDefault: true +# - name: CloudWatch +# type: cloudwatch +# access: proxy +# uid: cloudwatch +# editable: false +# jsonData: +# authType: credentials +# defaultRegion: us-east-1 + +## Configure notifiers +## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels +## +notifiers: {} +# notifiers.yaml: +# notifiers: +# - name: email-notifier +# type: email +# uid: email1 +# # either: +# org_id: 1 +# # or +# org_name: Main Org. +# is_default: true +# settings: +# addresses: an_email_address@example.com +# delete_notifiers: + +## Configure grafana dashboard providers +## ref: http://docs.grafana.org/administration/provisioning/#dashboards +## +## `path` must be /var/lib/grafana/dashboards/ +## +dashboardProviders: {} +# dashboardproviders.yaml: +# apiVersion: 1 +# providers: +# - name: 'default' +# orgId: 1 +# folder: '' +# type: file +# disableDeletion: false +# editable: true +# options: +# path: /var/lib/grafana/dashboards/default + +## Configure grafana dashboard to import +## NOTE: To use dashboards you must also enable/configure dashboardProviders +## ref: https://grafana.com/dashboards +## +## dashboards per provider, use provider name as key. +## +dashboards: {} + # default: + # some-dashboard: + # json: | + # $RAW_JSON + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # token: '' + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # token: '' + # b64content: true + +## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. +## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. +## ConfigMap data example: +## +## data: +## example-dashboard.json: | +## RAW_JSON +## +dashboardsConfigMaps: {} +# default: "" + +## Grafana's primary configuration +## NOTE: values in map will be converted to ini format +## ref: http://docs.grafana.org/installation/configuration/ +## +grafana.ini: + paths: + data: /var/lib/grafana/data + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net +## grafana Authentication can be enabled with the following values on grafana.ini + # server: + # The full public facing url you use in browser, used for redirects and emails + # root_url: + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://api.github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: +## LDAP Authentication can be enabled with the following values on grafana.ini +## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + +## Grafana's LDAP configuration +## Templated by the template in _helpers.tpl +## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled +## ref: http://docs.grafana.org/installation/configuration/#auth-ldap +## ref: http://docs.grafana.org/installation/ldap/#configuration +ldap: + enabled: false + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + +## Grafana's SMTP configuration +## NOTE: To enable, grafana.ini must be configured with smtp.enabled +## ref: http://docs.grafana.org/installation/configuration/#smtp +smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" + +## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders +## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards +sidecar: + image: + repository: rancher/mirrored-kiwigrid-k8s-sidecar + tag: 1.10.7 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + dashboards: + enabled: false + SCProvider: true + # label that the configmaps with dashboards are marked with + label: grafana_dashboard + # value of label that the configmaps with dashboards are set to + labelValue: null + # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) + folder: /tmp/dashboards + # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead + defaultFolderName: null + # If specified, the sidecar will search for dashboard config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + # provider configuration that lets grafana manage the dashboards + provider: + # name of the provider, should be unique + name: sidecarProvider + # orgid as configured in grafana + orgid: 1 + # folder in which the dashboards should be imported in grafana + folder: '' + # type of the provider + type: file + # disableDelete to activate a import-only behaviour + disableDelete: false + # allow updating provisioned dashboards from the UI + allowUiUpdates: false + # allow Grafana to replicate dashboard structure from filesystem + foldersFromFilesStructure: false + datasources: + enabled: false + # label that the configmaps with datasources are marked with + label: grafana_datasource + # value of label that the configmaps with datasources are set to + labelValue: null + # If specified, the sidecar will search for datasource config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + + ## The name of a secret in the same kubernetes namespace which contain values to be added to the environment + ## This can be useful for database passwords, etc. Value is templated. + envFromSecret: "" + notifiers: + enabled: false + # label that the configmaps with notifiers are marked with + label: grafana_notifier + # If specified, the sidecar will search for notifier config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + +## Override the deployment namespace +## +namespaceOverride: "" + +## Number of old ReplicaSets to retain +## +revisionHistoryLimit: 10 + +## Add a seperate remote image renderer deployment/service +imageRenderer: + # Enable the image-renderer deployment & service + enabled: false + replicas: 1 + image: + # image-renderer Image repository + repository: rancher/mirrored-grafana-grafana-image-renderer + # image-renderer Image tag + tag: 2.0.1 + # image-renderer Image sha (optional) + sha: "" + # image-renderer ImagePullPolicy + pullPolicy: Always + # extra environment variables + env: + HTTP_HOST: "0.0.0.0" + # RENDERING_ARGS: --disable-gpu,--window-size=1280x758 + # RENDERING_MODE: clustered + # image-renderer deployment serviceAccount + serviceAccountName: "" + # image-renderer deployment securityContext + securityContext: {} + # image-renderer deployment Host Aliases + hostAliases: [] + # image-renderer deployment priority class + priorityClassName: '' + service: + # image-renderer service port name + portName: 'http' + # image-renderer service port used by both service and deployment + port: 8081 + targetPort: 8081 + # In case a sub_path is used this needs to be added to the image renderer callback + grafanaSubPath: "" + # name of the image-renderer port on the pod + podPortName: http + # number of image-renderer replica sets to keep + revisionHistoryLimit: 10 + networkPolicy: + # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods + limitIngress: true + # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods + limitEgress: false + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/Chart.yaml new file mode 100755 index 000000000..56ff36fc7 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: k3sServer +type: application +version: 0.1.3 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/README.md new file mode 100755 index 000000000..dcecc69da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/README.md @@ -0,0 +1,54 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/_helpers.tpl new file mode 100755 index 000000000..f77b8edf4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/_helpers.tpl @@ -0,0 +1,87 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-clients-rbac.yaml new file mode 100755 index 000000000..95346dee6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,74 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-clients.yaml new file mode 100755 index 000000000..ed78792e5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-clients.yaml @@ -0,0 +1,135 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-proxy-rbac.yaml new file mode 100755 index 000000000..a3509c160 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-proxy.yaml new file mode 100755 index 000000000..571e13138 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-servicemonitor.yaml new file mode 100755 index 000000000..2f3d7e54c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: + - port: metrics + proxyUrl: {{ template "pushProxy.proxyUrl" . }} + {{- if .Values.clients.https.enabled }} + params: + _scheme: [https] + {{- end }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/values.yaml new file mode 100755 index 000000000..e1bcf79a5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/k3sServer/values.yaml @@ -0,0 +1,86 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher1-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher1-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/Chart.yaml new file mode 100755 index 000000000..1e90053e9 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-kube-state-metrics +apiVersion: v1 +appVersion: 1.9.8 +description: Install kube-state-metrics to generate and expose cluster-level metrics +home: https://github.com/kubernetes/kube-state-metrics/ +keywords: +- metric +- monitoring +- prometheus +- kubernetes +maintainers: +- email: tariq.ibrahim@mulesoft.com + name: tariq1890 +- email: manuel@rueg.eu + name: mrueg +name: kube-state-metrics +sources: +- https://github.com/kubernetes/kube-state-metrics/ +version: 2.13.1 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/LICENSE b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/LICENSE new file mode 100755 index 000000000..393b7a33b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright The Helm Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/README.md new file mode 100755 index 000000000..e93a3d252 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/README.md @@ -0,0 +1,66 @@ +# kube-state-metrics Helm Chart + +Installs the [kube-state-metrics agent](https://github.com/kubernetes/kube-state-metrics). + +## Get Repo Info + +```console +helm repo add kube-state-metrics https://kubernetes.github.io/kube-state-metrics +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +```console +# Helm 3 +$ helm install [RELEASE_NAME] kube-state-metrics/kube-state-metrics [flags] + +# Helm 2 +$ helm install --name [RELEASE_NAME] kube-state-metrics/kube-state-metrics [flags] +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +# Helm 3 +$ helm uninstall [RELEASE_NAME] + +# Helm 2 +# helm delete --purge [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +# Helm 3 or 2 +$ helm upgrade [RELEASE_NAME] kube-state-metrics/kube-state-metrics [flags] +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### From stable/kube-state-metrics + +You can upgrade in-place: + +1. [get repo info](#get-repo-info) +1. [upgrade](#upgrading-chart) your existing release name using the new chart repo + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments: + +```console +helm show values kube-state-metrics/kube-state-metrics +``` + +You may also `helm show values` on this chart's [dependencies](#dependencies) for additional options. diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/NOTES.txt b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/NOTES.txt new file mode 100755 index 000000000..5a646e0cc --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/NOTES.txt @@ -0,0 +1,10 @@ +kube-state-metrics is a simple service that listens to the Kubernetes API server and generates metrics about the state of the objects. +The exposed metrics can be found here: +https://github.com/kubernetes/kube-state-metrics/blob/master/docs/README.md#exposed-metrics + +The metrics are exported on the HTTP endpoint /metrics on the listening port. +In your case, {{ template "kube-state-metrics.fullname" . }}.{{ template "kube-state-metrics.namespace" . }}.svc.cluster.local:{{ .Values.service.port }}/metrics + +They are served either as plaintext or protobuf depending on the Accept header. +They are designed to be consumed either by Prometheus itself or by a scraper that is compatible with scraping a Prometheus client endpoint. + diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/_helpers.tpl new file mode 100755 index 000000000..4f76b188b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/_helpers.tpl @@ -0,0 +1,76 @@ +# Rancher +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "kube-state-metrics.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "kube-state-metrics.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "kube-state-metrics.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "kube-state-metrics.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "kube-state-metrics.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/clusterrolebinding.yaml new file mode 100755 index 000000000..af158c512 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/clusterrolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.rbac.create .Values.rbac.useClusterRole -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ template "kube-state-metrics.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} +{{- else }} + name: {{ template "kube-state-metrics.fullname" . }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/deployment.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/deployment.yaml new file mode 100755 index 000000000..4ab55291b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/deployment.yaml @@ -0,0 +1,217 @@ +apiVersion: apps/v1 +{{- if .Values.autosharding.enabled }} +kind: StatefulSet +{{- else }} +kind: Deployment +{{- end }} +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + app.kubernetes.io/version: "{{ .Chart.AppVersion }}" +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + replicas: {{ .Values.replicas }} +{{- if .Values.autosharding.enabled }} + serviceName: {{ template "kube-state-metrics.fullname" . }} + volumeClaimTemplates: [] +{{- end }} + template: + metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + app.kubernetes.io/instance: "{{ .Release.Name }}" +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 8 }} +{{- end }} +{{- if .Values.podAnnotations }} + annotations: +{{ toYaml .Values.podAnnotations | indent 8 }} +{{- end }} + spec: + hostNetwork: {{ .Values.hostNetwork }} + serviceAccountName: {{ template "kube-state-metrics.serviceAccountName" . }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + runAsGroup: {{ .Values.securityContext.runAsGroup }} + runAsUser: {{ .Values.securityContext.runAsUser }} + runAsNonRoot: {{ .Values.securityContext.runAsNonRoot }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + containers: + - name: {{ .Chart.Name }} +{{- if .Values.autosharding.enabled }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +{{- end }} + args: +{{ if .Values.extraArgs }} + {{- range .Values.extraArgs }} + - {{ . }} + {{- end }} +{{ end }} +{{ if .Values.collectors.certificatesigningrequests }} + - --collectors=certificatesigningrequests +{{ end }} +{{ if .Values.collectors.configmaps }} + - --collectors=configmaps +{{ end }} +{{ if .Values.collectors.cronjobs }} + - --collectors=cronjobs +{{ end }} +{{ if .Values.collectors.daemonsets }} + - --collectors=daemonsets +{{ end }} +{{ if .Values.collectors.deployments }} + - --collectors=deployments +{{ end }} +{{ if .Values.collectors.endpoints }} + - --collectors=endpoints +{{ end }} +{{ if .Values.collectors.horizontalpodautoscalers }} + - --collectors=horizontalpodautoscalers +{{ end }} +{{ if .Values.collectors.ingresses }} + - --collectors=ingresses +{{ end }} +{{ if .Values.collectors.jobs }} + - --collectors=jobs +{{ end }} +{{ if .Values.collectors.limitranges }} + - --collectors=limitranges +{{ end }} +{{ if .Values.collectors.mutatingwebhookconfigurations }} + - --collectors=mutatingwebhookconfigurations +{{ end }} +{{ if .Values.collectors.namespaces }} + - --collectors=namespaces +{{ end }} +{{ if .Values.collectors.networkpolicies }} + - --collectors=networkpolicies +{{ end }} +{{ if .Values.collectors.nodes }} + - --collectors=nodes +{{ end }} +{{ if .Values.collectors.persistentvolumeclaims }} + - --collectors=persistentvolumeclaims +{{ end }} +{{ if .Values.collectors.persistentvolumes }} + - --collectors=persistentvolumes +{{ end }} +{{ if .Values.collectors.poddisruptionbudgets }} + - --collectors=poddisruptionbudgets +{{ end }} +{{ if .Values.collectors.pods }} + - --collectors=pods +{{ end }} +{{ if .Values.collectors.replicasets }} + - --collectors=replicasets +{{ end }} +{{ if .Values.collectors.replicationcontrollers }} + - --collectors=replicationcontrollers +{{ end }} +{{ if .Values.collectors.resourcequotas }} + - --collectors=resourcequotas +{{ end }} +{{ if .Values.collectors.secrets }} + - --collectors=secrets +{{ end }} +{{ if .Values.collectors.services }} + - --collectors=services +{{ end }} +{{ if .Values.collectors.statefulsets }} + - --collectors=statefulsets +{{ end }} +{{ if .Values.collectors.storageclasses }} + - --collectors=storageclasses +{{ end }} +{{ if .Values.collectors.validatingwebhookconfigurations }} + - --collectors=validatingwebhookconfigurations +{{ end }} +{{ if .Values.collectors.verticalpodautoscalers }} + - --collectors=verticalpodautoscalers +{{ end }} +{{ if .Values.collectors.volumeattachments }} + - --collectors=volumeattachments +{{ end }} +{{ if .Values.namespace }} + - --namespace={{ .Values.namespace | join "," }} +{{ end }} +{{ if .Values.autosharding.enabled }} + - --pod=$(POD_NAME) + - --pod-namespace=$(POD_NAMESPACE) +{{ end }} +{{ if .Values.kubeconfig.enabled }} + - --kubeconfig=/opt/k8s/.kube/config +{{ end }} +{{ if .Values.selfMonitor.telemetryHost }} + - --telemetry-host={{ .Values.selfMonitor.telemetryHost }} +{{ end }} + - --telemetry-port=8081 +{{- if .Values.kubeconfig.enabled }} + volumeMounts: + - name: kubeconfig + mountPath: /opt/k8s/.kube/ + readOnly: true +{{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}" + ports: + - containerPort: 8080 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: / + port: 8080 + initialDelaySeconds: 5 + timeoutSeconds: 5 +{{- if .Values.resources }} + resources: +{{ toYaml .Values.resources | indent 10 }} +{{- end }} +{{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} +{{- end }} +{{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} +{{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.nodeSelector }} +{{ toYaml .Values.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.tolerations }} +{{ toYaml .Values.tolerations | indent 8 }} +{{- end }} +{{- if .Values.kubeconfig.enabled}} + volumes: + - name: kubeconfig + secret: + secretName: {{ template "kube-state-metrics.fullname" . }}-kubeconfig +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/kubeconfig-secret.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/kubeconfig-secret.yaml new file mode 100755 index 000000000..a7800d7ad --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/kubeconfig-secret.yaml @@ -0,0 +1,15 @@ +{{- if .Values.kubeconfig.enabled -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kube-state-metrics.fullname" . }}-kubeconfig + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" +type: Opaque +data: + config: '{{ .Values.kubeconfig.secret }}' +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/pdb.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/pdb.yaml new file mode 100755 index 000000000..d3ef8104e --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/pdb.yaml @@ -0,0 +1,20 @@ +{{- if .Values.podDisruptionBudget -}} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} +{{ toYaml .Values.podDisruptionBudget | indent 2 }} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/podsecuritypolicy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/podsecuritypolicy.yaml new file mode 100755 index 000000000..e822ba0e7 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/podsecuritypolicy.yaml @@ -0,0 +1,42 @@ +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +{{- if .Values.podSecurityPolicy.annotations }} + annotations: +{{ toYaml .Values.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + volumes: + - 'secret' +{{- if .Values.podSecurityPolicy.additionalVolumes }} +{{ toYaml .Values.podSecurityPolicy.additionalVolumes | indent 4 }} +{{- end }} + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/psp-clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/psp-clusterrole.yaml new file mode 100755 index 000000000..217abc950 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/psp-clusterrole.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: psp-{{ template "kube-state-metrics.fullname" . }} +rules: +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} +- apiGroups: ['policy'] +{{- else }} +- apiGroups: ['extensions'] +{{- end }} + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "kube-state-metrics.fullname" . }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml new file mode 100755 index 000000000..feb97f228 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/psp-clusterrolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.podSecurityPolicy.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: psp-{{ template "kube-state-metrics.fullname" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp-{{ template "kube-state-metrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/role.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/role.yaml new file mode 100755 index 000000000..6259d2f61 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/role.yaml @@ -0,0 +1,192 @@ +{{- if and (eq $.Values.rbac.create true) (not .Values.rbac.useExistingRole) -}} +{{- if eq .Values.rbac.useClusterRole false }} +{{- range (split "," $.Values.namespace) }} +{{- end }} +{{- end -}} +--- +apiVersion: rbac.authorization.k8s.io/v1 +{{- if eq .Values.rbac.useClusterRole false }} +kind: Role +{{- else }} +kind: ClusterRole +{{- end }} +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" $ }} + helm.sh/chart: {{ $.Chart.Name }}-{{ $.Chart.Version }} + app.kubernetes.io/managed-by: {{ $.Release.Service }} + app.kubernetes.io/instance: {{ $.Release.Name }} + name: {{ template "kube-state-metrics.fullname" $ }} +{{- if eq .Values.rbac.useClusterRole false }} + namespace: {{ . }} +{{- end }} +rules: +{{ if $.Values.collectors.certificatesigningrequests }} +- apiGroups: ["certificates.k8s.io"] + resources: + - certificatesigningrequests + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.configmaps }} +- apiGroups: [""] + resources: + - configmaps + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.cronjobs }} +- apiGroups: ["batch"] + resources: + - cronjobs + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.daemonsets }} +- apiGroups: ["extensions", "apps"] + resources: + - daemonsets + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.deployments }} +- apiGroups: ["extensions", "apps"] + resources: + - deployments + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.endpoints }} +- apiGroups: [""] + resources: + - endpoints + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.horizontalpodautoscalers }} +- apiGroups: ["autoscaling"] + resources: + - horizontalpodautoscalers + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.ingresses }} +- apiGroups: ["extensions", "networking.k8s.io"] + resources: + - ingresses + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.jobs }} +- apiGroups: ["batch"] + resources: + - jobs + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.limitranges }} +- apiGroups: [""] + resources: + - limitranges + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.mutatingwebhookconfigurations }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - mutatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.namespaces }} +- apiGroups: [""] + resources: + - namespaces + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.networkpolicies }} +- apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.nodes }} +- apiGroups: [""] + resources: + - nodes + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.persistentvolumeclaims }} +- apiGroups: [""] + resources: + - persistentvolumeclaims + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.persistentvolumes }} +- apiGroups: [""] + resources: + - persistentvolumes + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.poddisruptionbudgets }} +- apiGroups: ["policy"] + resources: + - poddisruptionbudgets + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.pods }} +- apiGroups: [""] + resources: + - pods + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.replicasets }} +- apiGroups: ["extensions", "apps"] + resources: + - replicasets + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.replicationcontrollers }} +- apiGroups: [""] + resources: + - replicationcontrollers + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.resourcequotas }} +- apiGroups: [""] + resources: + - resourcequotas + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.secrets }} +- apiGroups: [""] + resources: + - secrets + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.services }} +- apiGroups: [""] + resources: + - services + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.statefulsets }} +- apiGroups: ["apps"] + resources: + - statefulsets + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.storageclasses }} +- apiGroups: ["storage.k8s.io"] + resources: + - storageclasses + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.validatingwebhookconfigurations }} +- apiGroups: ["admissionregistration.k8s.io"] + resources: + - validatingwebhookconfigurations + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.volumeattachments }} +- apiGroups: ["storage.k8s.io"] + resources: + - volumeattachments + verbs: ["list", "watch"] +{{ end -}} +{{ if $.Values.collectors.verticalpodautoscalers }} +- apiGroups: ["autoscaling.k8s.io"] + resources: + - verticalpodautoscalers + verbs: ["list", "watch"] +{{ end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/rolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/rolebinding.yaml new file mode 100755 index 000000000..732174a33 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/rolebinding.yaml @@ -0,0 +1,27 @@ +{{- if and (eq .Values.rbac.create true) (eq .Values.rbac.useClusterRole false) -}} +{{- range (split "," $.Values.namespace) }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" $ }} + helm.sh/chart: {{ $.Chart.Name }}-{{ $.Chart.Version }} + app.kubernetes.io/managed-by: {{ $.Release.Service }} + app.kubernetes.io/instance: {{ $.Release.Name }} + name: {{ template "kube-state-metrics.fullname" $ }} + namespace: {{ . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role +{{- if (not $.Values.rbac.useExistingRole) }} + name: {{ template "kube-state-metrics.fullname" $ }} +{{- else }} + name: {{ $.Values.rbac.useExistingRole }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "kube-state-metrics.fullname" $ }} + namespace: {{ template "kube-state-metrics.namespace" $ }} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/service.yaml new file mode 100755 index 000000000..4f8e4a497 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/service.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} + annotations: + {{- if .Values.prometheusScrape }} + prometheus.io/scrape: '{{ .Values.prometheusScrape }}' + {{- end }} + {{- if .Values.service.annotations }} + {{- toYaml .Values.service.annotations | nindent 4 }} + {{- end }} +spec: + type: "{{ .Values.service.type }}" + ports: + - name: "http" + protocol: TCP + port: {{ .Values.service.port }} + {{- if .Values.service.nodePort }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + targetPort: 8080 + {{ if .Values.selfMonitor.enabled }} + - name: "metrics" + protocol: TCP + port: {{ .Values.selfMonitor.telemetryPort | default 8081 }} + targetPort: 8081 + {{ end }} +{{- if .Values.service.loadBalancerIP }} + loadBalancerIP: "{{ .Values.service.loadBalancerIP }}" +{{- end }} + selector: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/serviceaccount.yaml new file mode 100755 index 000000000..2e8a1ee38 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- if .Values.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.serviceAccount.annotations | indent 4 }} +{{- end }} +imagePullSecrets: +{{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/servicemonitor.yaml new file mode 100755 index 000000000..7d1cd7aa1 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/servicemonitor.yaml @@ -0,0 +1,34 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" + app.kubernetes.io/instance: "{{ .Release.Name }}" + app.kubernetes.io/managed-by: "{{ .Release.Service }}" + {{- if .Values.prometheus.monitor.additionalLabels }} +{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }} + {{- end }} +{{- if .Values.customLabels }} +{{ toYaml .Values.customLabels | indent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + endpoints: + - port: http + {{- if .Values.prometheus.monitor.honorLabels }} + honorLabels: true + {{- end }} + {{ if .Values.selfMonitor.enabled }} + - port: metrics + {{- if .Values.prometheus.monitor.honorLabels }} + honorLabels: true + {{- end }} + {{ end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/stsdiscovery-role.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/stsdiscovery-role.yaml new file mode 100755 index 000000000..9770b0498 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/stsdiscovery-role.yaml @@ -0,0 +1,29 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - apps + resourceNames: + - {{ template "kube-state-metrics.fullname" . }} + resources: + - statefulsets + verbs: + - get + - list + - watch +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml new file mode 100755 index 000000000..6a2e5bfe7 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/templates/stsdiscovery-rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.autosharding.enabled .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} + labels: + app.kubernetes.io/name: {{ template "kube-state-metrics.name" . }} + helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: stsdiscovery-{{ template "kube-state-metrics.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "kube-state-metrics.fullname" . }} + namespace: {{ template "kube-state-metrics.namespace" . }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/values.yaml new file mode 100755 index 000000000..f64645690 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kube-state-metrics/values.yaml @@ -0,0 +1,184 @@ +global: + cattle: + systemDefaultRegistry: "" + +# Default values for kube-state-metrics. +prometheusScrape: true +image: + repository: rancher/mirrored-kube-state-metrics-kube-state-metrics + tag: v1.9.8 + pullPolicy: IfNotPresent + +imagePullSecrets: [] +# - name: "image-pull-secret" + +# If set to true, this will deploy kube-state-metrics as a StatefulSet and the data +# will be automatically sharded across <.Values.replicas> pods using the built-in +# autodiscovery feature: https://github.com/kubernetes/kube-state-metrics#automated-sharding +# This is an experimental feature and there are no stability guarantees. +autosharding: + enabled: false + +replicas: 1 + +# List of additional cli arguments to configure kube-state-metrics +# for example: --enable-gzip-encoding, --log-file, etc. +# all the possible args can be found here: https://github.com/kubernetes/kube-state-metrics/blob/master/docs/cli-arguments.md +extraArgs: [] + +service: + port: 8080 + # Default to clusterIP for backward compatibility + type: ClusterIP + nodePort: 0 + loadBalancerIP: "" + annotations: {} + +customLabels: {} + +hostNetwork: false + +rbac: + # If true, create & use RBAC resources + create: true + + # Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to it, rolename set here. + # useExistingRole: your-existing-role + + # If set to false - Run without Cluteradmin privs needed - ONLY works if namespace is also set (if useExistingRole is set this name is used as ClusterRole or Role to bind to) + useClusterRole: true + +serviceAccount: + # Specifies whether a ServiceAccount should be created, require rbac true + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + # Reference to one or more secrets to be used when pulling images + # ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + imagePullSecrets: [] + # ServiceAccount annotations. + # Use case: AWS EKS IAM roles for service accounts + # ref: https://docs.aws.amazon.com/eks/latest/userguide/specify-service-account-role.html + annotations: {} + +prometheus: + monitor: + enabled: false + additionalLabels: {} + namespace: "" + honorLabels: false + +## Specify if a Pod Security Policy for kube-state-metrics must be created +## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + enabled: false + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + additionalVolumes: [] + +securityContext: + enabled: true + runAsNonRoot: true + runAsGroup: 65534 + runAsUser: 65534 + fsGroup: 65534 + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +nodeSelector: {} + +## Affinity settings for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +affinity: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +tolerations: [] + +# Annotations to be added to the pod +podAnnotations: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: "" + +# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} + +# Available collectors for kube-state-metrics. By default all available +# collectors are enabled. +collectors: + certificatesigningrequests: true + configmaps: true + cronjobs: true + daemonsets: true + deployments: true + endpoints: true + horizontalpodautoscalers: true + ingresses: true + jobs: true + limitranges: true + mutatingwebhookconfigurations: true + namespaces: true + networkpolicies: true + nodes: true + persistentvolumeclaims: true + persistentvolumes: true + poddisruptionbudgets: true + pods: true + replicasets: true + replicationcontrollers: true + resourcequotas: true + secrets: true + services: true + statefulsets: true + storageclasses: true + validatingwebhookconfigurations: true + verticalpodautoscalers: false + volumeattachments: true + +# Enabling kubeconfig will pass the --kubeconfig argument to the container +kubeconfig: + enabled: false + # base64 encoded kube-config file + secret: + +# Namespace to be enabled for collecting resources. By default all namespaces are collected. +# namespace: "" + +## Override the deployment namespace +## +namespaceOverride: "" + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 64Mi + # requests: + # cpu: 10m + # memory: 32Mi + +## Provide a k8s version to define apiGroups for podSecurityPolicy Cluster Role. +## For example: kubeTargetVersionOverride: 1.14.9 +## +kubeTargetVersionOverride: "" + +# Enable self metrics configuration for service and Service Monitor +# Default values for telemetry configuration can be overriden +selfMonitor: + enabled: false + # telemetryHost: 0.0.0.0 + # telemetryPort: 8081 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/Chart.yaml new file mode 100755 index 000000000..a82ef1d32 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: kubeAdmControllerManager +type: application +version: 0.1.3 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/README.md new file mode 100755 index 000000000..dcecc69da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/README.md @@ -0,0 +1,54 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/_helpers.tpl new file mode 100755 index 000000000..f77b8edf4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/_helpers.tpl @@ -0,0 +1,87 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-clients-rbac.yaml new file mode 100755 index 000000000..95346dee6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,74 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-clients.yaml new file mode 100755 index 000000000..ed78792e5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-clients.yaml @@ -0,0 +1,135 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-proxy-rbac.yaml new file mode 100755 index 000000000..a3509c160 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-proxy.yaml new file mode 100755 index 000000000..571e13138 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-servicemonitor.yaml new file mode 100755 index 000000000..2f3d7e54c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: + - port: metrics + proxyUrl: {{ template "pushProxy.proxyUrl" . }} + {{- if .Values.clients.https.enabled }} + params: + _scheme: [https] + {{- end }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/values.yaml new file mode 100755 index 000000000..e1bcf79a5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmControllerManager/values.yaml @@ -0,0 +1,86 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher1-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher1-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/Chart.yaml new file mode 100755 index 000000000..bfb047ae6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: kubeAdmEtcd +type: application +version: 0.1.3 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/README.md new file mode 100755 index 000000000..dcecc69da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/README.md @@ -0,0 +1,54 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/_helpers.tpl new file mode 100755 index 000000000..f77b8edf4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/_helpers.tpl @@ -0,0 +1,87 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-clients-rbac.yaml new file mode 100755 index 000000000..95346dee6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,74 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-clients.yaml new file mode 100755 index 000000000..ed78792e5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-clients.yaml @@ -0,0 +1,135 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-proxy-rbac.yaml new file mode 100755 index 000000000..a3509c160 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-proxy.yaml new file mode 100755 index 000000000..571e13138 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-servicemonitor.yaml new file mode 100755 index 000000000..2f3d7e54c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: + - port: metrics + proxyUrl: {{ template "pushProxy.proxyUrl" . }} + {{- if .Values.clients.https.enabled }} + params: + _scheme: [https] + {{- end }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/values.yaml new file mode 100755 index 000000000..e1bcf79a5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmEtcd/values.yaml @@ -0,0 +1,86 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher1-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher1-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/Chart.yaml new file mode 100755 index 000000000..ffe9ae70c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: kubeAdmProxy +type: application +version: 0.1.3 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/README.md new file mode 100755 index 000000000..dcecc69da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/README.md @@ -0,0 +1,54 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/_helpers.tpl new file mode 100755 index 000000000..f77b8edf4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/_helpers.tpl @@ -0,0 +1,87 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-clients-rbac.yaml new file mode 100755 index 000000000..95346dee6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,74 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-clients.yaml new file mode 100755 index 000000000..ed78792e5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-clients.yaml @@ -0,0 +1,135 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-proxy-rbac.yaml new file mode 100755 index 000000000..a3509c160 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-proxy.yaml new file mode 100755 index 000000000..571e13138 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-servicemonitor.yaml new file mode 100755 index 000000000..2f3d7e54c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: + - port: metrics + proxyUrl: {{ template "pushProxy.proxyUrl" . }} + {{- if .Values.clients.https.enabled }} + params: + _scheme: [https] + {{- end }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/values.yaml new file mode 100755 index 000000000..e1bcf79a5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmProxy/values.yaml @@ -0,0 +1,86 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher1-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher1-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/Chart.yaml new file mode 100755 index 000000000..794197de1 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: kubeAdmScheduler +type: application +version: 0.1.3 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/README.md new file mode 100755 index 000000000..dcecc69da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/README.md @@ -0,0 +1,54 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/_helpers.tpl new file mode 100755 index 000000000..f77b8edf4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/_helpers.tpl @@ -0,0 +1,87 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-clients-rbac.yaml new file mode 100755 index 000000000..95346dee6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,74 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-clients.yaml new file mode 100755 index 000000000..ed78792e5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-clients.yaml @@ -0,0 +1,135 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-proxy-rbac.yaml new file mode 100755 index 000000000..a3509c160 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-proxy.yaml new file mode 100755 index 000000000..571e13138 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-servicemonitor.yaml new file mode 100755 index 000000000..2f3d7e54c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: + - port: metrics + proxyUrl: {{ template "pushProxy.proxyUrl" . }} + {{- if .Values.clients.https.enabled }} + params: + _scheme: [https] + {{- end }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/values.yaml new file mode 100755 index 000000000..e1bcf79a5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/kubeAdmScheduler/values.yaml @@ -0,0 +1,86 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher1-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher1-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/Chart.yaml new file mode 100755 index 000000000..194f0877b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-prometheus-adapter +apiVersion: v1 +appVersion: v0.8.3 +description: A Helm chart for k8s prometheus adapter +home: https://github.com/DirectXMan12/k8s-prometheus-adapter +keywords: +- hpa +- metrics +- prometheus +- adapter +maintainers: +- email: mattias.gees@jetstack.io + name: mattiasgees +- name: steven-sheehy +- email: hfernandez@mesosphere.com + name: hectorj2f +name: prometheus-adapter +sources: +- https://github.com/kubernetes/charts +- https://github.com/DirectXMan12/k8s-prometheus-adapter +version: 2.12.1 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/README.md new file mode 100755 index 000000000..1fe1fad66 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/README.md @@ -0,0 +1,147 @@ +# Prometheus Adapter + +Installs the [Prometheus Adapter](https://github.com/DirectXMan12/k8s-prometheus-adapter) for the Custom Metrics API. Custom metrics are used in Kubernetes by [Horizontal Pod Autoscalers](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) to scale workloads based upon your own metric pulled from an external metrics provider like Prometheus. This chart complements the [metrics-server](https://github.com/helm/charts/tree/master/stable/metrics-server) chart that provides resource only metrics. + +## Prerequisites + +Kubernetes 1.14+ + +## Get Repo Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +```console +# Helm 3 +$ helm install [RELEASE_NAME] prometheus-community/prometheus-adapter + +# Helm 2 +$ helm install --name [RELEASE_NAME] prometheus-community/prometheus-adapter +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +# Helm 3 +$ helm uninstall [RELEASE_NAME] + +# Helm 2 +# helm delete --purge [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +# Helm 3 or 2 +$ helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +# Helm 2 +$ helm inspect values prometheus-community/prometheus-adapter + +# Helm 3 +$ helm show values prometheus-community/prometheus-adapter +``` + +### Prometheus Service Endpoint + +To use the chart, ensure the `prometheus.url` and `prometheus.port` are configured with the correct Prometheus service endpoint. If Prometheus is exposed under HTTPS the host's CA Bundle must be exposed to the container using `extraVolumes` and `extraVolumeMounts`. + +### Adapter Rules + +Additionally, the chart comes with a set of default rules out of the box but they may pull in too many metrics or not map them correctly for your needs. Therefore, it is recommended to populate `rules.custom` with a list of rules (see the [config document](https://github.com/DirectXMan12/k8s-prometheus-adapter/blob/master/docs/config.md) for the proper format). + +### Horizontal Pod Autoscaler Metrics + +Finally, to configure your Horizontal Pod Autoscaler to use the custom metric, see the custom metrics section of the [HPA walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#autoscaling-on-multiple-metrics-and-custom-metrics). + +The Prometheus Adapter can serve three different [metrics APIs](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-metrics-apis): + +### Custom Metrics + +Enabling this option will cause custom metrics to be served at `/apis/custom.metrics.k8s.io/v1beta1`. Enabled by default when `rules.default` is true, but can be customized by populating `rules.custom`: + +```yaml +rules: + custom: + - seriesQuery: '{__name__=~"^some_metric_count$"}' + resources: + template: <<.Resource>> + name: + matches: "" + as: "my_custom_metric" + metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) +``` + +### External Metrics + +Enabling this option will cause external metrics to be served at `/apis/external.metrics.k8s.io/v1beta1`. Can be enabled by populating `rules.external`: + +```yaml +rules: + external: + - seriesQuery: '{__name__=~"^some_metric_count$"}' + resources: + template: <<.Resource>> + name: + matches: "" + as: "my_external_metric" + metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) +``` + +### Resource Metrics + +Enabling this option will cause resource metrics to be served at `/apis/metrics.k8s.io/v1beta1`. Resource metrics will allow pod CPU and Memory metrics to be used in [Horizontal Pod Autoscalers](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) as well as the `kubectl top` command. Can be enabled by populating `rules.resource`: + +```yaml +rules: + resource: + cpu: + containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[3m])) by (<<.GroupBy>>) + nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[3m])) by (<<.GroupBy>>) + resources: + overrides: + instance: + resource: node + namespace: + resource: namespace + pod: + resource: pod + containerLabel: container + memory: + containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>) + nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>) + resources: + overrides: + instance: + resource: node + namespace: + resource: namespace + pod: + resource: pod + containerLabel: container + window: 3m +``` + +**NOTE:** Setting a value for `rules.resource` will also deploy the resource metrics API service, providing the same functionality as [metrics-server](https://github.com/helm/charts/tree/master/stable/metrics-server). As such it is not possible to deploy them both in the same cluster. diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/NOTES.txt b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/NOTES.txt new file mode 100755 index 000000000..b7b9b9932 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/NOTES.txt @@ -0,0 +1,9 @@ +{{ template "k8s-prometheus-adapter.fullname" . }} has been deployed. +In a few minutes you should be able to list metrics using the following command(s): +{{ if .Values.rules.resource }} + kubectl get --raw /apis/metrics.k8s.io/v1beta1 +{{- end }} + kubectl get --raw /apis/custom.metrics.k8s.io/v1beta1 +{{ if .Values.rules.external }} + kubectl get --raw /apis/external.metrics.k8s.io/v1beta1 +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/_helpers.tpl new file mode 100755 index 000000000..35c38b621 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/_helpers.tpl @@ -0,0 +1,72 @@ +# Rancher +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "k8s-prometheus-adapter.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "k8s-prometheus-adapter.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "k8s-prometheus-adapter.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "k8s-prometheus-adapter.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "k8s-prometheus-adapter.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/certmanager.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/certmanager.yaml new file mode 100755 index 000000000..7999e3c21 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/certmanager.yaml @@ -0,0 +1,48 @@ +{{- if .Values.certManager.enabled -}} +--- +# Create a selfsigned Issuer, in order to create a root CA certificate for +# signing webhook serving certificates +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ template "k8s-prometheus-adapter.fullname" . }}-self-signed-issuer +spec: + selfSigned: {} +--- +# Generate a CA Certificate used to sign certificates for the webhook +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ template "k8s-prometheus-adapter.fullname" . }}-root-cert +spec: + secretName: {{ template "k8s-prometheus-adapter.fullname" . }}-root-cert + duration: {{ .Values.certManager.caCertDuration }} + issuerRef: + name: {{ template "k8s-prometheus-adapter.fullname" . }}-self-signed-issuer + commonName: "ca.webhook.prometheus-adapter" + isCA: true +--- +# Create an Issuer that uses the above generated CA certificate to issue certs +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ template "k8s-prometheus-adapter.fullname" . }}-root-issuer +spec: + ca: + secretName: {{ template "k8s-prometheus-adapter.fullname" . }}-root-cert +--- +# Finally, generate a serving certificate for the apiservices to use +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ template "k8s-prometheus-adapter.fullname" . }}-cert +spec: + secretName: {{ template "k8s-prometheus-adapter.fullname" . }} + duration: {{ .Values.certManager.certDuration }} + issuerRef: + name: {{ template "k8s-prometheus-adapter.fullname" . }}-root-issuer + dnsNames: + - {{ template "k8s-prometheus-adapter.fullname" . }} + - {{ template "k8s-prometheus-adapter.fullname" . }}.{{ .Release.Namespace }} + - {{ template "k8s-prometheus-adapter.fullname" . }}.{{ .Release.Namespace }}.svc +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/cluster-role-binding-auth-delegator.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/cluster-role-binding-auth-delegator.yaml new file mode 100755 index 000000000..2bc9eb740 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/cluster-role-binding-auth-delegator.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.name" . }}-system-auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: +- kind: ServiceAccount + name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/cluster-role-binding-resource-reader.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/cluster-role-binding-resource-reader.yaml new file mode 100755 index 000000000..ec7e5e476 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/cluster-role-binding-resource-reader.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.name" . }}-resource-reader +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "k8s-prometheus-adapter.name" . }}-resource-reader +subjects: +- kind: ServiceAccount + name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/cluster-role-resource-reader.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/cluster-role-resource-reader.yaml new file mode 100755 index 000000000..319460a33 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/cluster-role-resource-reader.yaml @@ -0,0 +1,23 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.name" . }}-resource-reader +rules: +- apiGroups: + - "" + resources: + - namespaces + - pods + - services + - configmaps + verbs: + - get + - list + - watch +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/configmap.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/configmap.yaml new file mode 100755 index 000000000..fbc155dc8 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/configmap.yaml @@ -0,0 +1,96 @@ +{{- if not .Values.rules.existing -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "k8s-prometheus-adapter.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +data: + config.yaml: | +{{- if or .Values.rules.default .Values.rules.custom }} + rules: +{{- if .Values.rules.default }} + - seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}' + seriesFilters: [] + resources: + overrides: + namespace: + resource: namespace + pod: + resource: pod + name: + matches: ^container_(.*)_seconds_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[5m])) + by (<<.GroupBy>>) + - seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}' + seriesFilters: + - isNot: ^container_.*_seconds_total$ + resources: + overrides: + namespace: + resource: namespace + pod: + resource: pod + name: + matches: ^container_(.*)_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[5m])) + by (<<.GroupBy>>) + - seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}' + seriesFilters: + - isNot: ^container_.*_total$ + resources: + overrides: + namespace: + resource: namespace + pod: + resource: pod + name: + matches: ^container_(.*)$ + as: "" + metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>,container!="POD"}) by (<<.GroupBy>>) + - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' + seriesFilters: + - isNot: .*_total$ + resources: + template: <<.Resource>> + name: + matches: "" + as: "" + metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) + - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' + seriesFilters: + - isNot: .*_seconds_total + resources: + template: <<.Resource>> + name: + matches: ^(.*)_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[5m])) by (<<.GroupBy>>) + - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' + seriesFilters: [] + resources: + template: <<.Resource>> + name: + matches: ^(.*)_seconds_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[5m])) by (<<.GroupBy>>) +{{- end -}} +{{- if .Values.rules.custom }} +{{ toYaml .Values.rules.custom | indent 4 }} +{{- end -}} +{{- end -}} +{{- if .Values.rules.external }} + externalRules: +{{ toYaml .Values.rules.external | indent 4 }} +{{- end -}} +{{- if .Values.rules.resource }} + resourceRules: +{{ toYaml .Values.rules.resource | indent 6 }} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/custom-metrics-apiservice.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/custom-metrics-apiservice.yaml new file mode 100755 index 000000000..9bc1cbda1 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/custom-metrics-apiservice.yaml @@ -0,0 +1,32 @@ +{{- if or .Values.rules.default .Values.rules.custom }} +{{- if .Capabilities.APIVersions.Has "apiregistration.k8s.io/v1" }} +apiVersion: apiregistration.k8s.io/v1 +{{- else }} +apiVersion: apiregistration.k8s.io/v1beta1 +{{- end }} +kind: APIService +metadata: +{{- if .Values.certManager.enabled }} + annotations: + certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "k8s-prometheus-adapter.fullname" .) | quote }} + cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "k8s-prometheus-adapter.fullname" .) | quote }} +{{- end }} + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: v1beta1.custom.metrics.k8s.io +spec: + service: + name: {{ template "k8s-prometheus-adapter.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + {{- if .Values.tls.enable }} + caBundle: {{ b64enc .Values.tls.ca }} + {{- end }} + group: custom.metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: {{ if or .Values.tls.enable .Values.certManager.enabled }}false{{ else }}true{{ end }} + groupPriorityMinimum: 100 + versionPriority: 100 +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/custom-metrics-cluster-role-binding-hpa.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/custom-metrics-cluster-role-binding-hpa.yaml new file mode 100755 index 000000000..93ade6f8f --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/custom-metrics-cluster-role-binding-hpa.yaml @@ -0,0 +1,23 @@ +{{- /* +This if must be aligned with custom-metrics-cluster-role.yaml +as otherwise this binding will point to not existing role. +*/ -}} +{{- if and .Values.rbac.create (or .Values.rules.default .Values.rules.custom) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.name" . }}-hpa-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "k8s-prometheus-adapter.name" . }}-server-resources +subjects: +- kind: ServiceAccount + name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/custom-metrics-cluster-role.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/custom-metrics-cluster-role.yaml new file mode 100755 index 000000000..33daf7113 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/custom-metrics-cluster-role.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rbac.create (or .Values.rules.default .Values.rules.custom) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.name" . }}-server-resources +rules: +- apiGroups: + - custom.metrics.k8s.io + resources: ["*"] + verbs: ["*"] +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/deployment.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/deployment.yaml new file mode 100755 index 000000000..43fb65dc8 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/deployment.yaml @@ -0,0 +1,135 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + app: {{ template "k8s-prometheus-adapter.name" . }} + release: {{ .Release.Name }} + template: + metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- with .Values.podLabels }} + {{- toYaml . | trim | nindent 8 }} + {{- end }} + name: {{ template "k8s-prometheus-adapter.name" . }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.podAnnotations }} + {{- toYaml . | trim | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + {{- if .Values.hostNetwork.enabled }} + hostNetwork: true + {{- end }} + {{- if .Values.dnsPolicy }} + dnsPolicy: {{ .Values.dnsPolicy }} + {{- end}} + containers: + - name: {{ .Chart.Name }} + image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - /adapter + - --secure-port={{ .Values.listenPort }} + {{- if or .Values.tls.enable .Values.certManager.enabled }} + - --tls-cert-file=/var/run/serving-cert/tls.crt + - --tls-private-key-file=/var/run/serving-cert/tls.key + {{- end }} + - --cert-dir=/tmp/cert + - --logtostderr=true + - --prometheus-url={{ tpl .Values.prometheus.url . }}{{ if .Values.prometheus.port }}:{{ .Values.prometheus.port }}{{end}}{{ .Values.prometheus.path }} + - --metrics-relist-interval={{ .Values.metricsRelistInterval }} + - --v={{ .Values.logLevel }} + - --config=/etc/adapter/config.yaml + {{- if .Values.extraArguments }} + {{- toYaml .Values.extraArguments | trim | nindent 8 }} + {{- end }} + ports: + - containerPort: {{ .Values.listenPort }} + name: https + livenessProbe: + httpGet: + path: /healthz + port: https + scheme: HTTPS + initialDelaySeconds: 30 + readinessProbe: + httpGet: + path: /healthz + port: https + scheme: HTTPS + initialDelaySeconds: 30 + {{- if .Values.resources }} + resources: + {{- toYaml .Values.resources | nindent 10 }} + {{- end }} + {{- with .Values.dnsConfig }} + dnsConfig: + {{ toYaml . | indent 8 }} + {{- end }} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: ["all"] + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 10001 + volumeMounts: + {{- if .Values.extraVolumeMounts }} + {{ toYaml .Values.extraVolumeMounts | trim | nindent 8 }} + {{ end }} + - mountPath: /etc/adapter/ + name: config + readOnly: true + - mountPath: /tmp + name: tmp + {{- if or .Values.tls.enable .Values.certManager.enabled }} + - mountPath: /var/run/serving-cert + name: volume-serving-cert + readOnly: true + {{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.nodeSelector }} +{{- toYaml .Values.nodeSelector | nindent 8 }} +{{- end }} + affinity: + {{- toYaml .Values.affinity | nindent 8 }} + priorityClassName: {{ .Values.priorityClassName }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.tolerations }} +{{- toYaml .Values.tolerations | nindent 8 }} +{{- end }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + volumes: + {{- if .Values.extraVolumes }} + {{ toYaml .Values.extraVolumes | trim | nindent 6 }} + {{ end }} + - name: config + configMap: + name: {{ .Values.rules.existing | default (include "k8s-prometheus-adapter.fullname" . ) }} + - name: tmp + emptyDir: {} + {{- if or .Values.tls.enable .Values.certManager.enabled }} + - name: volume-serving-cert + secret: + secretName: {{ template "k8s-prometheus-adapter.fullname" . }} + {{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/external-metrics-apiservice.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/external-metrics-apiservice.yaml new file mode 100755 index 000000000..035f24694 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/external-metrics-apiservice.yaml @@ -0,0 +1,32 @@ +{{- if .Values.rules.external }} +{{- if .Capabilities.APIVersions.Has "apiregistration.k8s.io/v1" }} +apiVersion: apiregistration.k8s.io/v1 +{{- else }} +apiVersion: apiregistration.k8s.io/v1beta1 +{{- end }} +kind: APIService +metadata: +{{- if .Values.certManager.enabled }} + annotations: + certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "k8s-prometheus-adapter.fullname" .) | quote }} + cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "k8s-prometheus-adapter.fullname" .) | quote }} +{{- end }} + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: v1beta1.external.metrics.k8s.io +spec: + service: + name: {{ template "k8s-prometheus-adapter.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + {{- if .Values.tls.enable }} + caBundle: {{ b64enc .Values.tls.ca }} + {{- end }} + group: external.metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: {{ if or .Values.tls.enable .Values.certManager.enabled }}false{{ else }}true{{ end }} + groupPriorityMinimum: 100 + versionPriority: 100 +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/external-metrics-cluster-role-binding-hpa.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/external-metrics-cluster-role-binding-hpa.yaml new file mode 100755 index 000000000..0776029af --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/external-metrics-cluster-role-binding-hpa.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.rbac.create .Values.rules.external -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.name" . }}-hpa-controller-external-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "k8s-prometheus-adapter.name" . }}-external-metrics +subjects: +- kind: ServiceAccount + name: horizontal-pod-autoscaler + namespace: kube-system +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/external-metrics-cluster-role.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/external-metrics-cluster-role.yaml new file mode 100755 index 000000000..4adbd6537 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/external-metrics-cluster-role.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.rbac.create .Values.rules.external -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.name" . }}-external-metrics +rules: +- apiGroups: + - "external.metrics.k8s.io" + resources: + - "*" + verbs: + - list + - get + - watch +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/pdb.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/pdb.yaml new file mode 100755 index 000000000..b70309f6f --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/pdb.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "k8s-prometheus-adapter.fullname" . }} + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: {{ template "k8s-prometheus-adapter.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/psp.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/psp.yaml new file mode 100755 index 000000000..a88c9c2f2 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/psp.yaml @@ -0,0 +1,68 @@ +{{- if .Values.psp.create -}} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "k8s-prometheus-adapter.fullname" . }} + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + {{- if .Values.hostNetwork.enabled }} + hostNetwork: true + {{- end }} + fsGroup: + rule: RunAsAny + runAsGroup: + rule: RunAsAny + runAsUser: + rule: MustRunAs + ranges: + - min: 1024 + max: 65535 + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + volumes: + - secret + - emptyDir + - configMap +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.name" . }}-psp +rules: +- apiGroups: + - 'policy' + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "k8s-prometheus-adapter.fullname" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.name" . }}-psp +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "k8s-prometheus-adapter.name" . }}-psp +subjects: +- kind: ServiceAccount + name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/resource-metrics-apiservice.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/resource-metrics-apiservice.yaml new file mode 100755 index 000000000..ab75b2f6c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/resource-metrics-apiservice.yaml @@ -0,0 +1,32 @@ +{{- if .Values.rules.resource}} +{{- if .Capabilities.APIVersions.Has "apiregistration.k8s.io/v1" }} +apiVersion: apiregistration.k8s.io/v1 +{{- else }} +apiVersion: apiregistration.k8s.io/v1beta1 +{{- end }} +kind: APIService +metadata: +{{- if .Values.certManager.enabled }} + annotations: + certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "k8s-prometheus-adapter.fullname" .) | quote }} + cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "k8s-prometheus-adapter.fullname" .) | quote }} +{{- end }} + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: v1beta1.metrics.k8s.io +spec: + service: + name: {{ template "k8s-prometheus-adapter.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + {{- if .Values.tls.enable }} + caBundle: {{ b64enc .Values.tls.ca }} + {{- end }} + group: metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: {{ if or .Values.tls.enable .Values.certManager.enabled }}false{{ else }}true{{ end }} + groupPriorityMinimum: 100 + versionPriority: 100 +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/resource-metrics-cluster-role-binding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/resource-metrics-cluster-role-binding.yaml new file mode 100755 index 000000000..0534af11e --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/resource-metrics-cluster-role-binding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.rbac.create .Values.rules.resource -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.name" . }}-hpa-controller-metrics +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "k8s-prometheus-adapter.name" . }}-metrics +subjects: +- kind: ServiceAccount + name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/resource-metrics-cluster-role.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/resource-metrics-cluster-role.yaml new file mode 100755 index 000000000..01a307d69 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/resource-metrics-cluster-role.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.rbac.create .Values.rules.resource -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.name" . }}-metrics +rules: +- apiGroups: + - "" + resources: + - pods + - nodes + - nodes/stats + verbs: + - get + - list + - watch +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/role-binding-auth-reader.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/role-binding-auth-reader.yaml new file mode 100755 index 000000000..60f18f2b3 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/role-binding-auth-reader.yaml @@ -0,0 +1,20 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.name" . }}-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: +- kind: ServiceAccount + name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/secret.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/secret.yaml new file mode 100755 index 000000000..38e7cb624 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/secret.yaml @@ -0,0 +1,15 @@ +{{- if .Values.tls.enable -}} +apiVersion: v1 +kind: Secret +metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.fullname" . }} +type: kubernetes.io/tls +data: + tls.crt: {{ b64enc .Values.tls.certificate }} + tls.key: {{ b64enc .Values.tls.key }} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/service.yaml new file mode 100755 index 000000000..6bccda911 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/service.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Service +metadata: + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.fullname" . }} + namespace: {{ .Release.Namespace }} +spec: + ports: + - port: {{ .Values.service.port }} + protocol: TCP + targetPort: https + selector: + app: {{ template "k8s-prometheus-adapter.name" . }} + release: {{ .Release.Name }} + type: {{ .Values.service.type }} + diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/serviceaccount.yaml new file mode 100755 index 000000000..42ef0267e --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app: {{ template "k8s-prometheus-adapter.name" . }} + chart: {{ template "k8s-prometheus-adapter.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + name: {{ template "k8s-prometheus-adapter.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/values.yaml new file mode 100755 index 000000000..d9108cb9a --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-adapter/values.yaml @@ -0,0 +1,180 @@ +# Default values for k8s-prometheus-adapter.. +global: + cattle: + systemDefaultRegistry: "" + +affinity: {} + +image: + repository: rancher/mirrored-directxman12-k8s-prometheus-adapter + tag: v0.8.3 + pullPolicy: IfNotPresent + +logLevel: 4 + +metricsRelistInterval: 1m + +listenPort: 6443 + +nodeSelector: {} + +priorityClassName: "" + +# Url to access prometheus +prometheus: + # Value is templated + url: http://prometheus.default.svc + port: 9090 + path: "" + +replicas: 1 + +rbac: + # Specifies whether RBAC resources should be created + create: true + +psp: + # Specifies whether PSP resources should be created + create: false + +serviceAccount: + # Specifies whether a service account should be created + create: true + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: +# Custom DNS configuration to be added to prometheus-adapter pods +dnsConfig: {} +# nameservers: +# - 1.2.3.4 +# searches: +# - ns1.svc.cluster-domain.example +# - my.dns.search.suffix +# options: +# - name: ndots +# value: "2" +# - name: edns0 +resources: {} + # requests: + # cpu: 100m + # memory: 128Mi + # limits: + # cpu: 100m + # memory: 128Mi + +rules: + default: true + custom: [] +# - seriesQuery: '{__name__=~"^some_metric_count$"}' +# resources: +# template: <<.Resource>> +# name: +# matches: "" +# as: "my_custom_metric" +# metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) + # Mounts a configMap with pre-generated rules for use. Overrides the + # default, custom, external and resource entries + existing: + external: [] +# - seriesQuery: '{__name__=~"^some_metric_count$"}' +# resources: +# template: <<.Resource>> +# name: +# matches: "" +# as: "my_external_metric" +# metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) + resource: {} +# cpu: +# containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[3m])) by (<<.GroupBy>>) +# nodeQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>, id='/'}[3m])) by (<<.GroupBy>>) +# resources: +# overrides: +# instance: +# resource: node +# namespace: +# resource: namespace +# pod: +# resource: pod +# containerLabel: container +# memory: +# containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>) +# nodeQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>,id='/'}) by (<<.GroupBy>>) +# resources: +# overrides: +# instance: +# resource: node +# namespace: +# resource: namespace +# pod: +# resource: pod +# containerLabel: container +# window: 3m + +service: + annotations: {} + port: 443 + type: ClusterIP + +tls: + enable: false + ca: |- + # Public CA file that signed the APIService + key: |- + # Private key of the APIService + certificate: |- + # Public key of the APIService + +# Any extra arguments +extraArguments: [] + # - --tls-private-key-file=/etc/tls/tls.key + # - --tls-cert-file=/etc/tls/tls.crt + +# Any extra volumes +extraVolumes: [] + # - name: example-name + # hostPath: + # path: /path/on/host + # type: DirectoryOrCreate + # - name: ssl-certs + # hostPath: + # path: /etc/ssl/certs/ca-bundle.crt + # type: File + +# Any extra volume mounts +extraVolumeMounts: [] + # - name: example-name + # mountPath: /path/in/container + # - name: ssl-certs + # mountPath: /etc/ssl/certs/ca-certificates.crt + # readOnly: true + +tolerations: [] + +# Labels added to the pod +podLabels: {} + +# Annotations added to the pod +podAnnotations: {} + +hostNetwork: + # Specifies if prometheus-adapter should be started in hostNetwork mode. + # + # You would require this enabled if you use alternate overlay networking for pods and + # API server unable to communicate with metrics-server. As an example, this is required + # if you use Weave network on EKS. See also dnsPolicy + enabled: false + +# When hostNetwork is enabled, you probably want to set this to ClusterFirstWithHostNet +# dnsPolicy: ClusterFirstWithHostNet + +podDisruptionBudget: + # Specifies if PodDisruptionBudget should be enabled + # When enabled, minAvailable or maxUnavailable should also be defined. + enabled: false + minAvailable: + maxUnavailable: 1 + +certManager: + enabled: false + caCertDuration: 43800h + certDuration: 8760h diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/Chart.yaml new file mode 100755 index 000000000..887a6d5da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-node-exporter +apiVersion: v1 +appVersion: 1.1.2 +description: A Helm chart for prometheus node-exporter +home: https://github.com/prometheus/node_exporter/ +keywords: +- node-exporter +- prometheus +- exporter +maintainers: +- email: gianrubio@gmail.com + name: gianrubio +- name: vsliouniaev +- name: bismarck +name: prometheus-node-exporter +sources: +- https://github.com/prometheus/node_exporter/ +version: 1.16.2 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/README.md new file mode 100755 index 000000000..babde05e0 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/README.md @@ -0,0 +1,63 @@ +# Prometheus Node Exporter + +Prometheus exporter for hardware and OS metrics exposed by *NIX kernels, written in Go with pluggable metric collectors. + +This chart bootstraps a prometheus [Node Exporter](http://github.com/prometheus/node_exporter) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Get Repo Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +```console +# Helm 3 +$ helm install [RELEASE_NAME] prometheus-community/prometheus-node-exporter + +# Helm 2 +$ helm install --name [RELEASE_NAME] prometheus-community/prometheus-node-exporter +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Uninstall Chart + +```console +# Helm 3 +$ helm uninstall [RELEASE_NAME] + +# Helm 2 +# helm delete --purge [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +# Helm 3 or 2 +$ helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +## Configuring + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +# Helm 2 +$ helm inspect values prometheus-community/prometheus-node-exporter + +# Helm 3 +$ helm show values prometheus-community/prometheus-node-exporter +``` diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/NOTES.txt b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/NOTES.txt new file mode 100755 index 000000000..dc272fa99 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/NOTES.txt @@ -0,0 +1,15 @@ +1. Get the application URL by running these commands: +{{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ template "prometheus-node-exporter.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus-node-exporter.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ template "prometheus-node-exporter.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc -w {{ template "prometheus-node-exporter.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ template "prometheus-node-exporter.namespace" . }} {{ template "prometheus-node-exporter.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ template "prometheus-node-exporter.namespace" . }} -l "app={{ template "prometheus-node-exporter.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + echo "Visit http://127.0.0.1:9100 to use your application" + kubectl port-forward --namespace {{ template "prometheus-node-exporter.namespace" . }} $POD_NAME 9100 +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/_helpers.tpl new file mode 100755 index 000000000..9fd0d600b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/_helpers.tpl @@ -0,0 +1,95 @@ +# Rancher +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus-node-exporter.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "prometheus-node-exporter.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Generate basic labels */}} +{{- define "prometheus-node-exporter.labels" }} +app: {{ template "prometheus-node-exporter.name" . }} +heritage: {{.Release.Service }} +release: {{.Release.Name }} +chart: {{ template "prometheus-node-exporter.chart" . }} +{{- if .Values.podLabels}} +{{ toYaml .Values.podLabels }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus-node-exporter.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + + +{{/* +Create the name of the service account to use +*/}} +{{- define "prometheus-node-exporter.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "prometheus-node-exporter.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "prometheus-node-exporter.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/daemonset.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/daemonset.yaml new file mode 100755 index 000000000..a3a1bc885 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/daemonset.yaml @@ -0,0 +1,183 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "prometheus-node-exporter.fullname" . }} + namespace: {{ template "prometheus-node-exporter.namespace" . }} + labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} +spec: + selector: + matchLabels: + app: {{ template "prometheus-node-exporter.name" . }} + release: {{ .Release.Name }} + {{- if .Values.updateStrategy }} + updateStrategy: +{{ toYaml .Values.updateStrategy | indent 4 }} + {{- end }} + template: + metadata: + labels: {{ include "prometheus-node-exporter.labels" . | indent 8 }} + {{- if .Values.podAnnotations }} + annotations: + {{- toYaml .Values.podAnnotations | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "prometheus-node-exporter.serviceAccountName" . }} +{{- if .Values.securityContext }} + securityContext: +{{ toYaml .Values.securityContext | indent 8 }} +{{- end }} +{{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} +{{- end }} + containers: + - name: node-exporter + image: "{{ template "system_default_registry" . }}{{ .Values.image.repository }}:{{ .Values.image.tag }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + {{- if .Values.hostRootFsMount }} + - --path.rootfs=/host/root + {{- end }} + - --web.listen-address=$(HOST_IP):{{ .Values.service.port }} +{{- if .Values.extraArgs }} +{{ toYaml .Values.extraArgs | indent 12 }} +{{- end }} + {{- with .Values.containerSecurityContext }} + securityContext: {{ toYaml . | nindent 12 }} + {{- end }} + env: + - name: HOST_IP + {{- if .Values.service.listenOnAllInterfaces }} + value: 0.0.0.0 + {{- else }} + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.hostIP + {{- end }} + ports: + - name: metrics + containerPort: {{ .Values.service.port }} + protocol: TCP + livenessProbe: + httpGet: + path: / + port: {{ .Values.service.port }} + readinessProbe: + httpGet: + path: / + port: {{ .Values.service.port }} + resources: +{{ toYaml .Values.resources | indent 12 }} + volumeMounts: + - name: proc + mountPath: /host/proc + readOnly: true + - name: sys + mountPath: /host/sys + readOnly: true + {{- if .Values.hostRootFsMount }} + - name: root + mountPath: /host/root + mountPropagation: HostToContainer + readOnly: true + {{- end }} + {{- if .Values.extraHostVolumeMounts }} + {{- range $_, $mount := .Values.extraHostVolumeMounts }} + - name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} + readOnly: {{ $mount.readOnly }} + {{- if $mount.mountPropagation }} + mountPropagation: {{ $mount.mountPropagation }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.sidecarVolumeMount }} + {{- range $_, $mount := .Values.sidecarVolumeMount }} + - name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.configmaps }} + {{- range $_, $mount := .Values.configmaps }} + - name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} + {{- end }} + {{- if .Values.secrets }} + {{- range $_, $mount := .Values.secrets }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + {{- end }} + {{- end }} +{{- if .Values.sidecars }} +{{ toYaml .Values.sidecars | indent 8 }} + {{- if .Values.sidecarVolumeMount }} + volumeMounts: + {{- range $_, $mount := .Values.sidecarVolumeMount }} + - name: {{ $mount.name }} + mountPath: {{ $mount.mountPath }} + readOnly: {{ $mount.readOnly }} + {{- end }} + {{- end }} +{{- end }} + hostNetwork: {{ .Values.hostNetwork }} + hostPID: true +{{- if .Values.affinity }} + affinity: +{{ toYaml .Values.affinity | indent 8 }} +{{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- with .Values.dnsConfig }} + dnsConfig: +{{ toYaml . | indent 8 }} +{{- end }} +{{- if .Values.nodeSelector }} +{{- toYaml .Values.Selector | nindent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.tolerations }} +{{- toYaml .Values.tolerations | nindent 8 }} +{{- end }} + volumes: + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys + {{- if .Values.hostRootFsMount }} + - name: root + hostPath: + path: / + {{- end }} + {{- if .Values.extraHostVolumeMounts }} + {{- range $_, $mount := .Values.extraHostVolumeMounts }} + - name: {{ $mount.name }} + hostPath: + path: {{ $mount.hostPath }} + {{- end }} + {{- end }} + {{- if .Values.sidecarVolumeMount }} + {{- range $_, $mount := .Values.sidecarVolumeMount }} + - name: {{ $mount.name }} + emptyDir: + medium: Memory + {{- end }} + {{- end }} + {{- if .Values.configmaps }} + {{- range $_, $mount := .Values.configmaps }} + - name: {{ $mount.name }} + configMap: + name: {{ $mount.name }} + {{- end }} + {{- end }} + {{- if .Values.secrets }} + {{- range $_, $mount := .Values.secrets }} + - name: {{ $mount.name }} + secret: + secretName: {{ $mount.name }} + {{- end }} + {{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/endpoints.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/endpoints.yaml new file mode 100755 index 000000000..8daaeaaff --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/endpoints.yaml @@ -0,0 +1,18 @@ +{{- if .Values.endpoints }} +apiVersion: v1 +kind: Endpoints +metadata: + name: {{ template "prometheus-node-exporter.fullname" . }} + namespace: {{ template "prometheus-node-exporter.namespace" . }} + labels: +{{ include "prometheus-node-exporter.labels" . | indent 4 }} +subsets: + - addresses: + {{- range .Values.endpoints }} + - ip: {{ . }} + {{- end }} + ports: + - name: metrics + port: 9100 + protocol: TCP +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/monitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/monitor.yaml new file mode 100755 index 000000000..2f7b6ae9e --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/monitor.yaml @@ -0,0 +1,32 @@ +{{- if .Values.prometheus.monitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "prometheus-node-exporter.fullname" . }} + namespace: {{ template "prometheus-node-exporter.namespace" . }} + labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} + {{- if .Values.prometheus.monitor.additionalLabels }} +{{ toYaml .Values.prometheus.monitor.additionalLabels | indent 4 }} + {{- end }} +spec: + selector: + matchLabels: + app: {{ template "prometheus-node-exporter.name" . }} + release: {{ .Release.Name }} + endpoints: + - port: metrics + scheme: {{ $.Values.prometheus.monitor.scheme }} + {{- if $.Values.prometheus.monitor.bearerTokenFile }} + bearerTokenFile: {{ $.Values.prometheus.monitor.bearerTokenFile }} + {{- end }} + {{- if $.Values.prometheus.monitor.tlsConfig }} + tlsConfig: {{ toYaml $.Values.prometheus.monitor.tlsConfig | nindent 8 }} + {{- end }} + {{- if .Values.prometheus.monitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.prometheus.monitor.scrapeTimeout }} + {{- end }} +{{- if .Values.prometheus.monitor.relabelings }} + relabelings: +{{ toYaml .Values.prometheus.monitor.relabelings | indent 6 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml new file mode 100755 index 000000000..cb433369c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/psp-clusterrole.yaml @@ -0,0 +1,15 @@ +{{- if .Values.rbac.create }} +{{- if .Values.rbac.pspEnabled }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: psp-{{ template "prometheus-node-exporter.fullname" . }} + labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "prometheus-node-exporter.fullname" . }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml new file mode 100755 index 000000000..d36d93ecf --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/psp-clusterrolebinding.yaml @@ -0,0 +1,17 @@ +{{- if .Values.rbac.create }} +{{- if .Values.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: psp-{{ template "prometheus-node-exporter.fullname" . }} + labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: psp-{{ template "prometheus-node-exporter.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus-node-exporter.fullname" . }} + namespace: {{ template "prometheus-node-exporter.namespace" . }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/psp.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/psp.yaml new file mode 100755 index 000000000..f00506c98 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/psp.yaml @@ -0,0 +1,52 @@ +{{- if .Values.rbac.create }} +{{- if .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus-node-exporter.fullname" . }} + namespace: {{ template "prometheus-node-exporter.namespace" . }} + labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} +spec: + privileged: false + # Required to prevent escalations to root. + # allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + #requiredDropCapabilities: + # - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + - 'hostPath' + hostNetwork: true + hostIPC: false + hostPID: true + hostPorts: + - min: 0 + max: 65535 + runAsUser: + # Permits the container to run with root privileges as well. + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/service.yaml new file mode 100755 index 000000000..b0a447fe3 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/service.yaml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "prometheus-node-exporter.fullname" . }} + namespace: {{ template "prometheus-node-exporter.namespace" . }} +{{- if .Values.service.annotations }} + annotations: +{{ toYaml .Values.service.annotations | indent 4 }} +{{- end }} + labels: {{ include "prometheus-node-exporter.labels" . | indent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + {{- if ( and (eq .Values.service.type "NodePort" ) (not (empty .Values.service.nodePort)) ) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: metrics + selector: + app: {{ template "prometheus-node-exporter.name" . }} + release: {{ .Release.Name }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/serviceaccount.yaml new file mode 100755 index 000000000..07e9f0d94 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/templates/serviceaccount.yaml @@ -0,0 +1,18 @@ +{{- if .Values.rbac.create -}} +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "prometheus-node-exporter.serviceAccountName" . }} + namespace: {{ template "prometheus-node-exporter.namespace" . }} + labels: + app: {{ template "prometheus-node-exporter.name" . }} + chart: {{ template "prometheus-node-exporter.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" + annotations: +{{ toYaml .Values.serviceAccount.annotations | indent 4 }} +imagePullSecrets: +{{ toYaml .Values.serviceAccount.imagePullSecrets | indent 2 }} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/values.yaml new file mode 100755 index 000000000..47dedd4d2 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/prometheus-node-exporter/values.yaml @@ -0,0 +1,177 @@ +# Default values for prometheus-node-exporter. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +global: + cattle: + systemDefaultRegistry: "" + +image: + repository: rancher/mirrored-prometheus-node-exporter + tag: v1.1.2 + pullPolicy: IfNotPresent + +service: + type: ClusterIP + port: 9100 + targetPort: 9100 + nodePort: + listenOnAllInterfaces: true + annotations: + prometheus.io/scrape: "true" + +prometheus: + monitor: + enabled: false + additionalLabels: {} + namespace: "" + scheme: http + bearerTokenFile: + tlsConfig: {} + + relabelings: [] + scrapeTimeout: 10s + +## Customize the updateStrategy if set +updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + annotations: {} + imagePullSecrets: [] + +securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + +containerSecurityContext: {} + # capabilities: + # add: + # - SYS_TIME + +rbac: + ## If true, create & use RBAC resources + ## + create: true + ## If true, create & use Pod Security Policy resources + ## https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + pspEnabled: true + +# for deployments that have node_exporter deployed outside of the cluster, list +# their addresses here +endpoints: [] + +# Expose the service to the host network +hostNetwork: true + +## If true, node-exporter pods mounts host / at /host/root +## +hostRootFsMount: true + +## Assign a group of affinity scheduling rules +## +affinity: {} +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchFields: +# - key: metadata.name +# operator: In +# values: +# - target-host-name + +# Annotations to be added to node exporter pods +podAnnotations: + # Fix for very slow GKE cluster upgrades + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + +# Extra labels to be added to node exporter pods +podLabels: {} + +# Custom DNS configuration to be added to prometheus-node-exporter pods +dnsConfig: {} +# nameservers: +# - 1.2.3.4 +# searches: +# - ns1.svc.cluster-domain.example +# - my.dns.search.suffix +# options: +# - name: ndots +# value: "2" +# - name: edns0 + +## Assign a nodeSelector if operating a hybrid cluster +## +nodeSelector: {} +# beta.kubernetes.io/arch: amd64 +# beta.kubernetes.io/os: linux + +tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + +## Assign a PriorityClassName to pods if set +# priorityClassName: "" + +## Additional container arguments +## +extraArgs: [] +# - --collector.diskstats.ignored-devices=^(ram|loop|fd|(h|s|v)d[a-z]|nvme\\d+n\\d+p)\\d+$ +# - --collector.textfile.directory=/run/prometheus + +## Additional mounts from the host +## +extraHostVolumeMounts: [] +# - name: +# hostPath: +# mountPath: +# readOnly: true|false +# mountPropagation: None|HostToContainer|Bidirectional + +## Additional configmaps to be mounted. +## +configmaps: [] +# - name: +# mountPath: +secrets: [] +# - name: +# mountPath: +## Override the deployment namespace +## +namespaceOverride: "" + +## Additional containers for export metrics to text file +## +sidecars: [] +## - name: nvidia-dcgm-exporter +## image: nvidia/dcgm-exporter:1.4.3 + +## Volume for sidecar containers +## +sidecarVolumeMount: [] +## - name: collector-textfiles +## mountPath: /run/prometheus +## readOnly: false diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/Chart.yaml new file mode 100755 index 000000000..e5205567e --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: rke2ControllerManager +type: application +version: 0.1.3 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/README.md new file mode 100755 index 000000000..dcecc69da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/README.md @@ -0,0 +1,54 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/_helpers.tpl new file mode 100755 index 000000000..f77b8edf4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/_helpers.tpl @@ -0,0 +1,87 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-clients-rbac.yaml new file mode 100755 index 000000000..95346dee6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,74 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-clients.yaml new file mode 100755 index 000000000..ed78792e5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-clients.yaml @@ -0,0 +1,135 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-proxy-rbac.yaml new file mode 100755 index 000000000..a3509c160 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-proxy.yaml new file mode 100755 index 000000000..571e13138 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-servicemonitor.yaml new file mode 100755 index 000000000..2f3d7e54c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: + - port: metrics + proxyUrl: {{ template "pushProxy.proxyUrl" . }} + {{- if .Values.clients.https.enabled }} + params: + _scheme: [https] + {{- end }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/values.yaml new file mode 100755 index 000000000..e1bcf79a5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2ControllerManager/values.yaml @@ -0,0 +1,86 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher1-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher1-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/Chart.yaml new file mode 100755 index 000000000..7320aec04 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: rke2Etcd +type: application +version: 0.1.3 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/README.md new file mode 100755 index 000000000..dcecc69da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/README.md @@ -0,0 +1,54 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/_helpers.tpl new file mode 100755 index 000000000..f77b8edf4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/_helpers.tpl @@ -0,0 +1,87 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-clients-rbac.yaml new file mode 100755 index 000000000..95346dee6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,74 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-clients.yaml new file mode 100755 index 000000000..ed78792e5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-clients.yaml @@ -0,0 +1,135 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-proxy-rbac.yaml new file mode 100755 index 000000000..a3509c160 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-proxy.yaml new file mode 100755 index 000000000..571e13138 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-servicemonitor.yaml new file mode 100755 index 000000000..2f3d7e54c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: + - port: metrics + proxyUrl: {{ template "pushProxy.proxyUrl" . }} + {{- if .Values.clients.https.enabled }} + params: + _scheme: [https] + {{- end }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/values.yaml new file mode 100755 index 000000000..e1bcf79a5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Etcd/values.yaml @@ -0,0 +1,86 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher1-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher1-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/Chart.yaml new file mode 100755 index 000000000..1e2201169 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: rke2Proxy +type: application +version: 0.1.3 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/README.md new file mode 100755 index 000000000..dcecc69da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/README.md @@ -0,0 +1,54 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/_helpers.tpl new file mode 100755 index 000000000..f77b8edf4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/_helpers.tpl @@ -0,0 +1,87 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-clients-rbac.yaml new file mode 100755 index 000000000..95346dee6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,74 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-clients.yaml new file mode 100755 index 000000000..ed78792e5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-clients.yaml @@ -0,0 +1,135 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-proxy-rbac.yaml new file mode 100755 index 000000000..a3509c160 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-proxy.yaml new file mode 100755 index 000000000..571e13138 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-servicemonitor.yaml new file mode 100755 index 000000000..2f3d7e54c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: + - port: metrics + proxyUrl: {{ template "pushProxy.proxyUrl" . }} + {{- if .Values.clients.https.enabled }} + params: + _scheme: [https] + {{- end }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/values.yaml new file mode 100755 index 000000000..e1bcf79a5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Proxy/values.yaml @@ -0,0 +1,86 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher1-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher1-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/Chart.yaml new file mode 100755 index 000000000..4b076fb84 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: rke2Scheduler +type: application +version: 0.1.3 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/README.md new file mode 100755 index 000000000..dcecc69da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/README.md @@ -0,0 +1,54 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/_helpers.tpl new file mode 100755 index 000000000..f77b8edf4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/_helpers.tpl @@ -0,0 +1,87 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-clients-rbac.yaml new file mode 100755 index 000000000..95346dee6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,74 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-clients.yaml new file mode 100755 index 000000000..ed78792e5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-clients.yaml @@ -0,0 +1,135 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-proxy-rbac.yaml new file mode 100755 index 000000000..a3509c160 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-proxy.yaml new file mode 100755 index 000000000..571e13138 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-servicemonitor.yaml new file mode 100755 index 000000000..2f3d7e54c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: + - port: metrics + proxyUrl: {{ template "pushProxy.proxyUrl" . }} + {{- if .Values.clients.https.enabled }} + params: + _scheme: [https] + {{- end }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/values.yaml new file mode 100755 index 000000000..e1bcf79a5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rke2Scheduler/values.yaml @@ -0,0 +1,86 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher1-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher1-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/Chart.yaml new file mode 100755 index 000000000..09ef21031 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: rkeControllerManager +type: application +version: 0.1.3 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/README.md new file mode 100755 index 000000000..dcecc69da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/README.md @@ -0,0 +1,54 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/_helpers.tpl new file mode 100755 index 000000000..f77b8edf4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/_helpers.tpl @@ -0,0 +1,87 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-clients-rbac.yaml new file mode 100755 index 000000000..95346dee6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,74 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-clients.yaml new file mode 100755 index 000000000..ed78792e5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-clients.yaml @@ -0,0 +1,135 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-proxy-rbac.yaml new file mode 100755 index 000000000..a3509c160 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-proxy.yaml new file mode 100755 index 000000000..571e13138 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-servicemonitor.yaml new file mode 100755 index 000000000..2f3d7e54c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: + - port: metrics + proxyUrl: {{ template "pushProxy.proxyUrl" . }} + {{- if .Values.clients.https.enabled }} + params: + _scheme: [https] + {{- end }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/values.yaml new file mode 100755 index 000000000..e1bcf79a5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeControllerManager/values.yaml @@ -0,0 +1,86 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher1-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher1-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/Chart.yaml new file mode 100755 index 000000000..a4f4b02e4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: rkeEtcd +type: application +version: 0.1.3 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/README.md new file mode 100755 index 000000000..dcecc69da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/README.md @@ -0,0 +1,54 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/_helpers.tpl new file mode 100755 index 000000000..f77b8edf4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/_helpers.tpl @@ -0,0 +1,87 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-clients-rbac.yaml new file mode 100755 index 000000000..95346dee6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,74 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-clients.yaml new file mode 100755 index 000000000..ed78792e5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-clients.yaml @@ -0,0 +1,135 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-proxy-rbac.yaml new file mode 100755 index 000000000..a3509c160 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-proxy.yaml new file mode 100755 index 000000000..571e13138 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-servicemonitor.yaml new file mode 100755 index 000000000..2f3d7e54c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: + - port: metrics + proxyUrl: {{ template "pushProxy.proxyUrl" . }} + {{- if .Values.clients.https.enabled }} + params: + _scheme: [https] + {{- end }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/values.yaml new file mode 100755 index 000000000..e1bcf79a5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeEtcd/values.yaml @@ -0,0 +1,86 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher1-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher1-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/Chart.yaml new file mode 100755 index 000000000..f86115b68 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: rkeProxy +type: application +version: 0.1.3 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/README.md new file mode 100755 index 000000000..dcecc69da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/README.md @@ -0,0 +1,54 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/_helpers.tpl new file mode 100755 index 000000000..f77b8edf4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/_helpers.tpl @@ -0,0 +1,87 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-clients-rbac.yaml new file mode 100755 index 000000000..95346dee6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,74 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-clients.yaml new file mode 100755 index 000000000..ed78792e5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-clients.yaml @@ -0,0 +1,135 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-proxy-rbac.yaml new file mode 100755 index 000000000..a3509c160 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-proxy.yaml new file mode 100755 index 000000000..571e13138 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-servicemonitor.yaml new file mode 100755 index 000000000..2f3d7e54c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: + - port: metrics + proxyUrl: {{ template "pushProxy.proxyUrl" . }} + {{- if .Values.clients.https.enabled }} + params: + _scheme: [https] + {{- end }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/values.yaml new file mode 100755 index 000000000..e1bcf79a5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeProxy/values.yaml @@ -0,0 +1,86 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher1-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher1-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/Chart.yaml new file mode 100755 index 000000000..9b58f56a5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/Chart.yaml @@ -0,0 +1,13 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: linux + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-pushprox +apiVersion: v1 +appVersion: 0.1.0 +description: Sets up a deployment of the PushProx proxy and a DaemonSet of PushProx + clients. +name: rkeScheduler +type: application +version: 0.1.3 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/README.md new file mode 100755 index 000000000..dcecc69da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/README.md @@ -0,0 +1,54 @@ +# rancher-pushprox + +A Rancher chart based on Rancher [PushProx](https://github.com/rancher/PushProx) that sets up a Deployment of a PushProx proxy and a DaemonSet of PushProx clients on a Kubernetes cluster. + +Installs [rancher-pushprox](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-pushprox) to create PushProx clients that can access their host's network and register with a PushProx proxy. A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR is also included that is configured to scrape the metrics from each of the clients through the proxy. + +Using an instance of this chart is suitable for the following scenarios: +- You need to scrape metrics from a port that should not be accessible outside of the host (e.g. scraping `etcd` metrics in a hardened cluster) +- You need to scrape metrics on a host that are not exposed outside of 127.0.0.1 (e.g. scraping `kube-proxy` metrics) +- You need to scrape metrics through HTTPS using certs hosted directly on `hostPath` +- You need to scrape metrics from Kubernetes components that require authorization via a service account (e.g. permissions to make request to `/metrics`) +- You need to scrape metrics without access to cacerts (i.e. enable `insecureSkipVerify`) + +The clients and proxy are created based on a Rancher fork of the [prometheus-community/PushProx](https://github.com/prometheus-community/PushProx) project. + +## Configuration + +The following tables list the configurable parameters of the rancher-pushprox chart and their default values. + +### General + +#### Required +| Parameter | Description | Example | +| ----- | ----------- | ------ | +| `component` | The component that is being monitored | `kube-etcd` +| `metricsPort` | The port on the host that contains the metrics you want to scrape (e.g. `http://:/metrics`) | `2379` | + +#### Optional +| Parameter | Description | Default | +| ----- | ----------- | ------ | +| `serviceMonitor.enabled` | Deploys a [Prometheus Operator](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#servicemonitor) ServiceMonitor CR that is configured to scrape metrics on the hosts that the clients are deployed on via the proxy. Also deploys a Service that points to all pods with the expected client name that exposes the `metricsPort` selected | `true` | +| `clients.enabled` | Deploys a DaemonSet of clients that are each capable of scraping endpoints on the hostNetwork it is deployed on | `true` | +| `clients.port` | The port where the client will publish PushProx client-specific metrics. If deploying multiple clients onto the same node, the clients should not have conflicting ports | `9369` | +| `clients.proxyUrl` | Overrides the default proxyUrl setting of `http://pushprox-{{ .Values.component }}-proxy.{{ . Release.Namespace }}.svc.cluster.local:{{ .Values.proxy.port }}"` with the `proxyUrl` specified | `""` | +| `clients.useLocalhost` | Sets a flag on each client deployment to redirect scrapes directed to `HOST_IP` to `127.0.0.1` | `false` | +| `clients.https.enabled` | Enables scraping metrics via HTTPS using the provided TLS certs that exist on each host | `false` | +| `clients.https.useServiceAccountCredentials` | If set to true, the client will create a service account with permissions to scrape `/metrics` endpoint of Kubernetes components. The client will use the service account token provided to make authorized scrape requests to the Kubernetes API | `false` | +| `clients.https.insecureSkipVerify` | If set to true, the client will disable SSL security checks | `false` | +| `clients.https.certDir` | A `hostPath` where TLS certs can be found. This path is mounted as a volume on an `initContainer` which copies only the necessary files over to an EmptyDir volume used by each client. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.certFile` | The path to the TLS cert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.keyFile` | The path to the TLS key file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.https.caCertFile` | The path to the TLS cacert file located within `clients.https.certDir`. Required and only used if `clients.https.enabled` is set | `""` | +| `clients.resources` | Set resource limits and requests for the client container | `{}` | +| `clients.nodeSelector` | Select which nodes to deploy the clients on | `{}` | +| `clients.tolerations` | Specify tolerations for clients | `[]` | +| `proxy.enabled` | Deploys the proxy that each client will register with | `true` | +| `proxy.port` | The port exposed by the proxy that each client will register with to allow metrics to be scraped from the host | `8080` | +| `proxy.resources` | Set resource limits and requests for the proxy container | `{}` | +| `proxy.nodeSelector` | Select which nodes the proxy can be deployed on | `{}` | +| `proxy.tolerations` | Specify tolerations (if necessary) to allow the proxy to be deployed on the selected node | `[]` | + +*Tip: The filepaths set in `clients.https.File` can include wildcard characters*. + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for examples of how this chart can be used. \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/_helpers.tpl new file mode 100755 index 000000000..f77b8edf4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/_helpers.tpl @@ -0,0 +1,87 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# General + +{{- define "pushprox.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{- define "pushProxy.commonLabels" -}} +release: {{ .Release.Name }} +component: {{ .Values.component | quote }} +provider: kubernetes +{{- end -}} + +{{- define "pushProxy.proxyUrl" -}} +{{- $_ := (required "Template requires either .Values.proxy.port or .Values.client.proxyUrl to set proxyUrl for client" (or .Values.clients.proxyUrl .Values.proxy.port)) -}} +{{- if .Values.clients.proxyUrl -}} +{{ printf "%s" .Values.clients.proxyUrl }} +{{- else -}} +{{ printf "http://%s.%s.svc:%d" (include "pushProxy.proxy.name" .) .Release.Namespace (int .Values.proxy.port) }} +{{- end -}}{{- end -}} + +# Client + +{{- define "pushProxy.client.name" -}} +{{- printf "pushprox-%s-client" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.client.labels" -}} +k8s-app: {{ template "pushProxy.client.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# Proxy + +{{- define "pushProxy.proxy.name" -}} +{{- printf "pushprox-%s-proxy" (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.proxy.labels" -}} +k8s-app: {{ template "pushProxy.proxy.name" . }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} + +# ServiceMonitor + +{{- define "pushprox.serviceMonitor.name" -}} +{{- printf "%s-%s" .Release.Name (required ".Values.component is required" .Values.component) -}} +{{- end -}} + +{{- define "pushProxy.serviceMonitor.labels" -}} +app: {{ template "pushprox.serviceMonitor.name" . }} +release: {{ .Release.Name | quote }} +{{ template "pushProxy.commonLabels" . }} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-clients-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-clients-rbac.yaml new file mode 100755 index 000000000..95346dee6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-clients-rbac.yaml @@ -0,0 +1,74 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.client.name" . }} +{{- if and .Values.clients.https.enabled .Values.clients.https.useServiceAccountCredentials }} +- nonResourceURLs: ["/metrics"] + verbs: ["get"] +{{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.client.name" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.client.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: true + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + - 'emptyDir' + - 'hostPath' + allowedHostPaths: + - pathPrefix: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + readOnly: true +{{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-clients.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-clients.yaml new file mode 100755 index 000000000..ed78792e5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-clients.yaml @@ -0,0 +1,135 @@ +{{- if .Values.clients }}{{- if .Values.clients.enabled }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} + pushprox-exporter: "client" +spec: + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.client.labels" . | nindent 8 }} + spec: + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.clients.tolerations }} +{{ toYaml .Values.clients.tolerations | indent 8 }} +{{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ template "pushProxy.client.name" . }} + containers: + - name: pushprox-client + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: + {{- range .Values.clients.command }} + - {{ . | quote }} + {{- end }} + args: + - --fqdn=$(HOST_IP) + - --proxy-url=$(PROXY_URL) + - --metrics-addr=$(PORT) + - --allow-port={{ required "Need .Values.metricsPort to configure client to be allowed to scrape metrics at port" .Values.metricsPort}} + {{- if .Values.clients.useLocalhost }} + - --use-localhost + {{- end }} + {{- if .Values.clients.https.enabled }} + {{- if .Values.clients.https.insecureSkipVerify }} + - --insecure-skip-verify + {{- end }} + {{- if .Values.clients.https.useServiceAccountCredentials }} + - --token-path=/var/run/secrets/kubernetes.io/serviceaccount/token + {{- end }} + {{- if .Values.clients.https.certDir }} + - --tls.cert=/etc/ssl/push-proxy/push-proxy.pem + - --tls.key=/etc/ssl/push-proxy/push-proxy-key.pem + - --tls.cacert=/etc/ssl/push-proxy/push-proxy-ca-cert.pem + {{- end }} + {{- end }} + env: + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + - name: PORT + value: :{{ .Values.clients.port }} + - name: PROXY_URL + value: {{ template "pushProxy.proxyUrl" . }} + securityContext: + runAsNonRoot: true + runAsUser: 1000 + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + volumeMounts: + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + {{- end }} + {{- if .Values.clients.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} + {{- end }} + {{- if and .Values.clients.https.enabled .Values.clients.https.certDir }} + initContainers: + - name: copy-certs + image: {{ template "system_default_registry" . }}{{ .Values.clients.copyCertsImage.repository }}:{{ .Values.clients.copyCertsImage.tag }} + command: + - sh + - -c + - | + echo "Searching for files to copy within the source volume" + echo "cert: ${CERT_FILE_NAME}" + echo "key: ${KEY_FILE_NAME}" + echo "cacert: ${CACERT_FILE_NAME}" + + CERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CERT_FILE_NAME}" | sort -r | head -n 1) + KEY_FILE_SOURCE=$(find /etc/source/ -type f -name "${KEY_FILE_NAME}" | sort -r | head -n 1) + CACERT_FILE_SOURCE=$(find /etc/source/ -type f -name "${CACERT_FILE_NAME}" | sort -r | head -n 1) + + test -z ${CERT_FILE_SOURCE} && echo "Failed to find cert file" && exit 1 + test -z ${KEY_FILE_SOURCE} && echo "Failed to find key file" && exit 1 + test -z ${CACERT_FILE_SOURCE} && echo "Failed to find cacert file" && exit 1 + + echo "Copying cert file from $CERT_FILE_SOURCE to $CERT_FILE_TARGET" + cp $CERT_FILE_SOURCE $CERT_FILE_TARGET || exit 1 + chmod 444 $CERT_FILE_TARGET || exit 1 + + echo "Copying key file from $KEY_FILE_SOURCE to $KEY_FILE_TARGET" + cp $KEY_FILE_SOURCE $KEY_FILE_TARGET || exit 1 + chmod 444 $KEY_FILE_TARGET || exit 1 + + echo "Copying cacert file from $CACERT_FILE_SOURCE to $CACERT_FILE_TARGET" + cp $CACERT_FILE_SOURCE $CACERT_FILE_TARGET || exit 1 + chmod 444 $CACERT_FILE_TARGET || exit 1 + env: + - name: CERT_FILE_NAME + value: {{ required "Need a TLS cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.certFile }} + - name: KEY_FILE_NAME + value: {{ required "Need a TLS key file for scraping metrics endpoint over HTTPs" .Values.clients.https.keyFile }} + - name: CACERT_FILE_NAME + value: {{ required "Need a TLS CA cert file for scraping metrics endpoint over HTTPs" .Values.clients.https.caCertFile }} + - name: CERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy.pem + - name: KEY_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-key.pem + - name: CACERT_FILE_TARGET + value: /etc/ssl/push-proxy/push-proxy-ca-cert.pem + securityContext: + runAsNonRoot: false + volumeMounts: + - name: metrics-cert-dir-source + mountPath: /etc/source + readOnly: true + - name: metrics-cert-dir + mountPath: /etc/ssl/push-proxy + volumes: + - name: metrics-cert-dir-source + hostPath: + path: {{ required "Need access to volume on host with the SSL cert files to use HTTPs" .Values.clients.https.certDir }} + - name: metrics-cert-dir + emptyDir: {} + {{- end }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-proxy-rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-proxy-rbac.yaml new file mode 100755 index 000000000..a3509c160 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-proxy-rbac.yaml @@ -0,0 +1,63 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +rules: +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "pushProxy.proxy.name" . }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "pushProxy.proxy.name" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "pushProxy.proxy.name" . }} +subjects: + - kind: ServiceAccount + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ .Release.Namespace }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-proxy.yaml new file mode 100755 index 000000000..571e13138 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-proxy.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.proxy }}{{ if .Values.proxy.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} + pushprox-exporter: "proxy" +spec: + selector: + matchLabels: {{ include "pushProxy.proxy.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "pushProxy.proxy.labels" . | nindent 8 }} + spec: + securityContext: + runAsNonRoot: true + runAsUser: 1000 + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- if .Values.proxy.nodeSelector }} +{{ toYaml .Values.proxy.nodeSelector | indent 8 }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- if .Values.proxy.tolerations }} +{{ toYaml .Values.proxy.tolerations | indent 8 }} +{{- end }} + serviceAccountName: {{ template "pushProxy.proxy.name" . }} + containers: + - name: pushprox-proxy + image: {{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }} + command: + {{- range .Values.proxy.command }} + - {{ . | quote }} + {{- end }} + {{- if .Values.proxy.resources }} + resources: {{ toYaml .Values.proxy.resources | nindent 10 }} + {{- end }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.proxy.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +spec: + ports: + - name: pp-proxy + port: {{ required "Need .Values.proxy.port to configure proxy" .Values.proxy.port }} + protocol: TCP + targetPort: {{ .Values.proxy.port }} + selector: {{ include "pushProxy.proxy.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-servicemonitor.yaml new file mode 100755 index 000000000..2f3d7e54c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/templates/pushprox-servicemonitor.yaml @@ -0,0 +1,39 @@ +{{- if .Values.serviceMonitor }}{{- if .Values.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "pushprox.serviceMonitor.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.serviceMonitor.labels" . | nindent 4 }} +spec: + endpoints: + - port: metrics + proxyUrl: {{ template "pushProxy.proxyUrl" . }} + {{- if .Values.clients.https.enabled }} + params: + _scheme: [https] + {{- end }} + jobLabel: component + podTargetLabels: + - component + - pushprox-exporter + namespaceSelector: + matchNames: + - {{ template "pushprox.namespace" . }} + selector: + matchLabels: {{ include "pushProxy.client.labels" . | nindent 6 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "pushProxy.client.name" . }} + namespace: {{ template "pushprox.namespace" . }} + labels: {{ include "pushProxy.client.labels" . | nindent 4 }} +spec: + ports: + - name: metrics + port: {{ required "Need .Values.metricsPort to configure client to listen to metrics at port" .Values.metricsPort}} + protocol: TCP + targetPort: {{ .Values.metricsPort }} + selector: {{ include "pushProxy.client.labels" . | nindent 4 }} +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/values.yaml new file mode 100755 index 000000000..e1bcf79a5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/rkeScheduler/values.yaml @@ -0,0 +1,86 @@ +# Default values for rancher-pushprox. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Default image containing both the proxy and the client was generated from the following Dockerfile +# https://github.com/prometheus-community/PushProx/blob/eeadbe766641699129920ccfaaaa30a85c67fe81/Dockerfile#L1-L15 + +# Note: if using a cloud provider, the nodes that contain the PushProxy client must allow the node(s) that contain(s) +# the PushProxy proxy to communicate with it on port {{ .Values.clients.port }}. If you have special restrictions, +# (i.e. client should only run on etcd nodes and only control plane should have access to the port on the etcd node), +# you will need to set the clients / proxy nodeSelector and tolerations accordingly + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + +# The component that is being monitored (i.e. etcd) +component: "component" + +# The port containing the metrics that need to be scraped +metricsPort: 2739 + +# Configure ServiceMonitor that monitors metrics from the metricsPort endpoint +serviceMonitor: + enabled: true + +clients: + enabled: true + # The port which the PushProx client will post PushProx metrics to + port: 9369 + # If unset, this will default to the URL for the proxy service: http://pushprox-{{component}}-proxy.{{namepsace}}.svc.cluster.local:{{proxy.port}} + # Should be modified if the clients are being deployed outside the cluster where the proxy rests, otherwise leave it null + proxyUrl: "" + # If set to true, the client will forward any requests from the host IP to 127.0.0.1 + # It will only allow proxy requests to the metricsPort specified + useLocalhost: false + # Configuration for accessing metrics via HTTPS + https: + # Does the client require https to access the metrics? + enabled: false + # If set to true, the client will create a service account with adequate permissions and set a flag + # on the client to use the service account token provided by it to make authorized scrape requests + useServiceAccountCredentials: false + # If set to true, the client will disable SSL security checks + insecureSkipVerify: false + # Directory on host where necessary TLS cert and key to scrape metrics can be found + certDir: "" + # Filenames for files located in .Values.clients.https.certDir that correspond to TLS settings + certFile: "" + keyFile: "" + caCertFile: "" + + # Resource limits + resources: {} + + # Options to select all nodes to deploy client DaemonSet on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-client + tag: v0.1.0-rancher1-client + command: ["pushprox-client"] + + copyCertsImage: + repository: rancher/mirrored-library-busybox + tag: 1.31.1 + +proxy: + enabled: true + # The port through which PushProx clients will communicate to the proxy + port: 8080 + + # Resource limits + resources: {} + + # Options to select a node to run a single proxy deployment on + nodeSelector: {} + tolerations: [] + + image: + repository: rancher/pushprox-proxy + tag: v0.1.0-rancher1-proxy + command: ["pushprox-proxy"] \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/.helmignore b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/.helmignore new file mode 100755 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/Chart.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/Chart.yaml new file mode 100755 index 000000000..fba9162f2 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/Chart.yaml @@ -0,0 +1,15 @@ +annotations: + catalog.cattle.io/hidden: "true" + catalog.cattle.io/os: windows + catalog.rancher.io/certified: rancher + catalog.rancher.io/namespace: cattle-monitoring-system + catalog.rancher.io/release-name: rancher-windows-exporter +apiVersion: v1 +appVersion: 0.0.4 +description: Sets up monitoring metrics from Windows nodes via Prometheus windows-exporter +maintainers: +- email: arvind.iyengar@rancher.com + name: aiyengar2 +name: windowsExporter +type: application +version: 0.1.0 diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/README.md b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/README.md new file mode 100755 index 000000000..6115b6f25 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/README.md @@ -0,0 +1,17 @@ +# rancher-windows-exporter + +A Rancher chart based on the [prometheus-community/windows-exporter](https://github.com/prometheus-community/windows_exporter) project (previously called wmi-exporter) that sets up a DaemonSet of clients that can scrape windows-exporter metrics from Windows nodes on a Kubernetes cluster. + +A [Prometheus Operator](https://github.com/coreos/prometheus-operator) ServiceMonitor CR and PrometheusRule CR are also created by this chart to collect metrics and add some recording rules to map `windows_` series with their OS-agnostic counterparts. + +## Node Requirements + +Since Windows does not support privileged pods, this chart expects a Named Pipe (`\\.\pipe\rancher_wins`) to exist on the Windows host that allows containers to communicate with the host. This is done by deploying a [rancher/wins](https://github.com/rancher/wins) server on the host. + +The image used by the chart, [windows_exporter-package](https://github.com/rancher/windows_exporter-package), is configured to create a wins client that communicates with the wins server, alongside a running copy of a particular version of [windows-exporter](https://github.com/prometheus-community/windows_exporter). Through the wins client and wins server, the windows-exporter is able to communicate directly with the Windows host to collect metrics and expose them. + +If the cluster you are installing this chart on is a custom cluster that was created via RKE1 with Windows Support enabled, your nodes should already have the wins server running; this should have been added as part of [the bootstrapping process for adding the Windows node onto your RKE1 cluster](https://github.com/rancher/rancher/blob/master/package/windows/bootstrap.ps1). + +## Configuration + +See [rancher-monitoring](https://github.com/rancher/charts/tree/gh-pages/packages/rancher-monitoring) for an example of how this chart can be used. diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/scripts/check-wins-version.ps1 b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/scripts/check-wins-version.ps1 new file mode 100755 index 000000000..f8452bbef --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/scripts/check-wins-version.ps1 @@ -0,0 +1,20 @@ +$ErrorActionPreference = 'Stop' + +$winsPath = "c:\Windows\wins.exe" +$minWinsVersion = [System.Version]"0.1.0" + +function Get-Wins-Version +{ + $winsAppInfo = Invoke-Expression "& $winsPath cli app info | ConvertFrom-Json" + return [System.Version]($winsAppInfo.Server.Version.substring(1)) +} + +# Wait till the wins version installed is at least v0.1.0 +$winsVersion = Get-Wins-Version +while ($winsVersion -lt $minWinsVersion) { + Write-Host $('wins on host must be at least v{0}, found v{1}. Checking again in 10 seconds...' -f $minWinsVersion, $winsVersion) + Start-Sleep -s 10 + $winsVersion = Get-Wins-Version +} + +Write-Host $('Detected wins version on host is v{0}, which is >v{1}. Continuing with installation...' -f $winsVersion, $minWinsVersion) diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/scripts/proxy-entry.ps1 b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/scripts/proxy-entry.ps1 new file mode 100755 index 000000000..9d0581b66 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/scripts/proxy-entry.ps1 @@ -0,0 +1,11 @@ +# default +$listenPort = "9796" + +if ($env:LISTEN_PORT) { + $listenPort = $env:LISTEN_PORT +} + +# format "UDP:4789 TCP:8080" +$winsPublish = $('TCP:{0}' -f $listenPort) + +wins.exe cli proxy --publish $winsPublish diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/scripts/run.ps1 b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/scripts/run.ps1 new file mode 100755 index 000000000..c2e980a3f --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/scripts/run.ps1 @@ -0,0 +1,78 @@ +$ErrorActionPreference = 'Stop' + +function Create-Directory +{ + param ( + [parameter(Mandatory = $false, ValueFromPipeline = $true)] [string]$Path + ) + + if (Test-Path -Path $Path) { + if (-not (Test-Path -Path $Path -PathType Container)) { + # clean the same path file + Remove-Item -Recurse -Force -Path $Path -ErrorAction Ignore | Out-Null + } + + return + } + + New-Item -Force -ItemType Directory -Path $Path | Out-Null +} + +function Transfer-File +{ + param ( + [parameter(Mandatory = $true)] [string]$Src, + [parameter(Mandatory = $true)] [string]$Dst + ) + + if (Test-Path -PathType leaf -Path $Dst) { + $dstHasher = Get-FileHash -Path $Dst + $srcHasher = Get-FileHash -Path $Src + if ($dstHasher.Hash -eq $srcHasher.Hash) { + return + } + } + + $null = Copy-Item -Force -Path $Src -Destination $Dst +} + +# Copy binary into host +Create-Directory -Path "c:\host\etc\windows-exporter" +Transfer-File -Src "c:\etc\windows-exporter\windows-exporter.exe" -Dst "c:\host\etc\windows-exporter\windows-exporter.exe" + +# Copy binary into prefix path, since wins expects the same path on the host and on the container +$prefixPath = 'c:\' +if ($env:CATTLE_PREFIX_PATH) { + $prefixPath = $env:CATTLE_PREFIX_PATH +} +$winsDirPath = $('{0}etc\windows-exporter' -f $prefixPath) +$winsPath = $('{0}\windows-exporter.exe' -f $winsDirPath) + +Create-Directory -Path $winsDirPath +Transfer-File -Src "c:\etc\windows-exporter\windows-exporter.exe" $winsPath + +# Run wins with defaults +$listenPort = "9796" +$enabledCollectors = "net,os,service,system,cpu,cs,logical_disk" +$maxRequests = "5" + +if ($env:LISTEN_PORT) { + $listenPort = $env:LISTEN_PORT +} + +if ($env:ENABLED_COLLECTORS) { + $enabledCollectors = $env:ENABLED_COLLECTORS +} + +if ($env:MAX_REQUESTS) { + $maxRequests = $env:MAX_REQUESTS +} + +# format "UDP:4789 TCP:8080" +$winsExposes = $('TCP:{0}' -f $listenPort) + +# format "--a=b --c=d" +$winsArgs = $('--collectors.enabled={0} --telemetry.addr=:{1} --telemetry.max-requests={2} --telemetry.path=/metrics' -f $enabledCollectors, $listenPort, $maxRequests) + + +wins.exe cli prc run --path $winsPath --exposes $winsExposes --args "$winsArgs" diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/_helpers.tpl new file mode 100755 index 000000000..4fc68cf97 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/_helpers.tpl @@ -0,0 +1,73 @@ +# Rancher + +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +# General + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +The components in this chart create additional resources that expand the longest created name strings. +The longest name that gets created adds and extra 37 characters, so truncation should be 63-35=26. +*/}} +{{- define "windowsExporter.name" -}} +{{ printf "%s-windows-exporter" .Release.Name }} +{{- end -}} + +{{- define "windowsExporter.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride -}} +{{- end -}} + +{{- define "windowsExporter.labels" -}} +k8s-app: {{ template "windowsExporter.name" . }} +release: {{ .Release.Name }} +component: "windows-exporter" +provider: kubernetes +{{- end -}} + +# Client + +{{- define "windowsExporter.client.nodeSelector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: windows +{{- else -}} +kubernetes.io/os: windows +{{- end -}} +{{- if .Values.clients.nodeSelector }} +{{ toYaml .Values.clients.nodeSelector }} +{{- end }} +{{- end -}} + +{{- define "windowsExporter.client.tolerations" -}} +{{- if .Values.clients.tolerations -}} +{{ toYaml .Values.clients.tolerations }} +{{- else -}} +- operator: Exists +{{- end -}} +{{- end -}} + +{{- define "windowsExporter.client.env" -}} +- name: LISTEN_PORT + value: {{ required "Need .Values.clients.port to figure out where to get metrics from" .Values.clients.port | quote }} +{{- if .Values.clients.enabledCollectors }} +- name: ENABLED_COLLECTORS + value: {{ .Values.clients.enabledCollectors | quote }} +{{- end }} +{{- if .Values.clients.env }} +{{ toYaml .Values.clients.env }} +{{- end }} +{{- end -}} + +{{- define "windowsExporter.validatePathPrefix" -}} +{{- if .Values.global.cattle.rkeWindowsPathPrefix -}} +{{- $prefixPath := (.Values.global.cattle.rkeWindowsPathPrefix | replace "/" "\\") -}} +{{- if (not (hasSuffix "\\" $prefixPath)) -}} +{{- fail (printf ".Values.global.cattle.rkeWindowsPathPrefix must end in '/' or '\\', found %s" $prefixPath) -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/configmap.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/configmap.yaml new file mode 100755 index 000000000..e7647a407 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/configmap.yaml @@ -0,0 +1,10 @@ +{{- if .Values.clients }}{{ if .Values.clients.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "windowsExporter.name" . }}-scripts + namespace: {{ template "windowsExporter.namespace" . }} + labels: {{ include "windowsExporter.labels" . | nindent 4 }} +data: +{{ (.Files.Glob "scripts/*").AsConfig | indent 2 }} +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/daemonset.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/daemonset.yaml new file mode 100755 index 000000000..a64d19a3e --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/daemonset.yaml @@ -0,0 +1,77 @@ +{{- if .Values.clients }}{{ if .Values.clients.enabled }} +{{ include "windowsExporter.validatePathPrefix" . }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ template "windowsExporter.name" . }} + namespace: {{ template "windowsExporter.namespace" . }} + labels: {{ include "windowsExporter.labels" . | nindent 4 }} +spec: + selector: + matchLabels: {{ include "windowsExporter.labels" . | nindent 6 }} + template: + metadata: + labels: {{ include "windowsExporter.labels" . | nindent 8 }} + spec: + nodeSelector: {{ include "windowsExporter.client.nodeSelector" . | nindent 8 }} + tolerations: {{ include "windowsExporter.client.tolerations" . | nindent 8 }} + serviceAccountName: {{ template "windowsExporter.name" . }} + containers: + - name: exporter-node-proxy + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: ["pwsh", "-f", "c:/scripts/proxy-entry.ps1"] + ports: + - name: http + containerPort: {{ required "Need .Values.clients.port to figure out where to get metrics from" .Values.clients.port }} + env: {{ include "windowsExporter.client.env" . | nindent 10 }} +{{- if .Values.resources }} + resources: {{ toYaml .Values.clients.proxy.resources | nindent 10 }} +{{- end }} + volumeMounts: + - name: wins-pipe-proxy + mountPath: \\.\pipe\rancher_wins_proxy + - name: exporter-scripts + mountPath: c:/scripts/ + - name: exporter-node + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: ["pwsh", "-f", "c:/scripts/run.ps1"] +{{- if .Values.clients.args }} + args: {{ .Values.clients.args }} +{{- end }} + env: {{ include "windowsExporter.client.env" . | nindent 8 }} + - name: CATTLE_PREFIX_PATH + value: {{ default "c:\\" .Values.global.cattle.rkeWindowsPathPrefix | replace "/" "\\" }} +{{- if .Values.resources }} + resources: {{ toYaml .Values.clients.resources | nindent 10 }} +{{- end }} + volumeMounts: + - name: wins-pipe + mountPath: \\.\pipe\rancher_wins + - name: binary-host-path + mountPath: c:/host/etc/windows-exporter + - name: exporter-scripts + mountPath: c:/scripts/ + initContainers: + - name: check-wins-version + image: {{ template "system_default_registry" . }}{{ .Values.clients.image.repository }}:{{ .Values.clients.image.tag }} + command: ["pwsh", "-f", "c:/scripts/check-wins-version.ps1"] + volumeMounts: + - name: wins-pipe + mountPath: \\.\pipe\rancher_wins + - name: exporter-scripts + mountPath: c:/scripts/ + volumes: + - name: wins-pipe + hostPath: + path: \\.\pipe\rancher_wins + - name: wins-pipe-proxy + hostPath: + path: \\.\pipe\rancher_wins_proxy + - name: binary-host-path + hostPath: + path: {{ default "c:\\" .Values.global.cattle.rkeWindowsPathPrefix | replace "\\" "/" }}etc/windows-exporter + type: DirectoryOrCreate + - name: exporter-scripts + configMap: + name: {{ template "windowsExporter.name" . }}-scripts +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/rbac.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/rbac.yaml new file mode 100755 index 000000000..ebec8f235 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/rbac.yaml @@ -0,0 +1,78 @@ +{{- if .Values.clients }}{{ if .Values.clients.enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "windowsExporter.name" . }} + namespace: {{ template "windowsExporter.namespace" . }} + labels: {{ include "windowsExporter.labels" . | nindent 4 }} +rules: +- apiGroups: ['authentication.k8s.io'] + resources: ['tokenreviews'] + verbs: ['create'] +- apiGroups: ['authorization.k8s.io'] + resources: ['subjectaccessreviews'] + verbs: ['create'] +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: ['{{ template "windowsExporter.name" . }}'] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "windowsExporter.name" . }} + namespace: {{ template "windowsExporter.namespace" . }} + labels: {{ include "windowsExporter.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "windowsExporter.name" . }} +subjects: +- kind: ServiceAccount + name: {{ template "windowsExporter.name" . }} + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "windowsExporter.name" . }} + namespace: {{ template "windowsExporter.namespace" . }} + labels: {{ include "windowsExporter.labels" . | nindent 4 }} +{{- if .Values.clients.imagePullSecrets }} +imagePullSecrets: {{ toYaml .Values.clients.imagePullSecrets | nindent 2 }} +{{- end }} +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "windowsExporter.name" . }} + namespace: {{ template "windowsExporter.namespace" . }} + labels: {{ include "windowsExporter.labels" . | nindent 4 }} +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 0 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' + - 'hostPath' + allowedHostPaths: + - pathPrefix: \\.\pipe\rancher_wins + - pathPrefix: \\.\pipe\rancher_wins_proxy + - pathPrefix: c:/etc/windows-exporter +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/service.yaml new file mode 100755 index 000000000..03b87faae --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/service.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.clients }}{{- if and .Values.clients.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "windowsExporter.name" . }} + namespace: {{ template "windowsExporter.namespace" . }} + labels: {{ include "windowsExporter.labels" . | nindent 4 }} +spec: + ports: + - name: windows-metrics + port: {{ required "Need .Values.clients.port to figure out where to get metrics from" .Values.clients.port }} + protocol: TCP + targetPort: {{ .Values.clients.port }} + selector: {{ include "windowsExporter.labels" . | nindent 4 }} +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/servicemonitor.yaml new file mode 100755 index 000000000..a2c2f0b54 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/templates/servicemonitor.yaml @@ -0,0 +1,44 @@ +{{- if and .Values.serviceMonitor .Values.clients }}{{- if and .Values.serviceMonitor.enabled .Values.clients.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: {{ include "windowsExporter.labels" . | nindent 4 }} + name: {{ template "windowsExporter.name" . }} + namespace: {{ template "windowsExporter.namespace" . }} +spec: + selector: + matchLabels: {{ include "windowsExporter.labels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ template "windowsExporter.namespace" . }} + jobLabel: component + podTargetLabels: + - component + endpoints: + - port: windows-metrics + metricRelabelings: + - sourceLabels: [__name__] + regex: 'wmi_(.*)' + replacement: 'windows_$1' + targetLabel: __name__ + - sourceLabels: [__name__] + regex: windows_mssql_transactions_active_total + replacement: 'windows_mssql_transactions_active' + targetLabel: __name__ + - sourceLabels: [volume, nic] + regex: (.*);(.*) + separator: '' + targetLabel: device + action: replace + replacement: $1$2 + - sourceLabels: [__name__] + regex: windows_cs_logical_processors + replacement: 'system' + targetLabel: mode + relabelings: + - separator: ':' + sourceLabels: + - __meta_kubernetes_pod_host_ip + - __meta_kubernetes_pod_container_port_number + targetLabel: instance +{{- end }}{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/values.yaml new file mode 100755 index 000000000..6130890bd --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/charts/windowsExporter/values.yaml @@ -0,0 +1,46 @@ +# Default values for rancher-windows-exporter. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Configuration + +global: + cattle: + systemDefaultRegistry: "" + rkeWindowsPathPrefix: "c:\\" + +# Configure ServiceMonitor that monitors metrics +serviceMonitor: + enabled: true + +## Components scraping metrics from Windows nodes +## +clients: + enabled: true + + port: 9796 + image: + repository: rancher/windows_exporter-package + tag: v0.0.1 + os: "windows" + + # Specify the IP addresses of nodes that you want to collect metrics from + endpoints: [] + + # Get more details on https://github.com/prometheus-community/windows_exporter + args: [] + env: {} + enabledCollectors: "net,os,service,system,cpu,cs,logical_disk,tcp,memory,container" + + # Resource limits + resources: {} + + # Options to select nodes to target for scraping Windows metrics + nodeSelector: {} # Note: {kubernetes.io/os: windows} is default and cannot be overridden + tolerations: [] # Note: if not specified, the default option is to use [{operator: Exists}] + + # Image Pull Secrets for the service account used by the clients + imagePullSecrets: {} + + proxy: + resources: {} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/ingress-nginx/nginx.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/ingress-nginx/nginx.json new file mode 100755 index 000000000..347c9eb05 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/ingress-nginx/nginx.json @@ -0,0 +1,1463 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "5.2.1" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "5.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "5.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + }, + { + "datasource": "${DS_PROMETHEUS}", + "enable": true, + "expr": "sum(changes(nginx_ingress_controller_config_last_reload_successful_timestamp_seconds{instance!=\"unknown\",controller_class=~\"$controller_class\",namespace=~\"$namespace\"}[30s])) by (controller_class)", + "hide": false, + "iconColor": "rgba(255, 96, 96, 1)", + "limit": 100, + "name": "Config Reloads", + "showIn": 0, + "step": "30s", + "tagKeys": "controller_class", + "tags": [], + "titleFormat": "Config Reloaded", + "type": "tags" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "iteration": 1534359654832, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_PROMETHEUS}", + "format": "ops", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 0, + "y": 0 + }, + "id": 20, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "round(sum(irate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",namespace=~\"$namespace\"}[2m])), 0.001)", + "format": "time_series", + "intervalFactor": 1, + "refId": "A", + "step": 4 + } + ], + "thresholds": "", + "title": "Controller Request Volume", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_PROMETHEUS}", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 6, + "y": 0 + }, + "id": 82, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(avg_over_time(nginx_ingress_controller_nginx_process_connections{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",state=\"active\"}[2m]))", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "refId": "A", + "step": 4 + } + ], + "thresholds": "", + "title": "Controller Connections", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_PROMETHEUS}", + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 80, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": false + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 12, + "y": 0 + }, + "id": 21, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(rate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",namespace=~\"$namespace\",status!~\"[4-5].*\"}[2m])) / sum(rate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",namespace=~\"$namespace\"}[2m]))", + "format": "time_series", + "intervalFactor": 1, + "refId": "A", + "step": 4 + } + ], + "thresholds": "95, 99, 99.5", + "title": "Controller Success Rate (non-4|5xx responses)", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_PROMETHEUS}", + "decimals": 0, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 18, + "y": 0 + }, + "id": 81, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "avg(irate(nginx_ingress_controller_success{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"}[1m])) * 60", + "format": "time_series", + "instant": false, + "intervalFactor": 1, + "refId": "A", + "step": 4 + } + ], + "thresholds": "", + "title": "Config Reloads", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "total" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_PROMETHEUS}", + "decimals": 0, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 21, + "y": 0 + }, + "id": 83, + "interval": null, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": true, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "count(nginx_ingress_controller_config_last_reload_successful{controller_pod=~\"$controller\",controller_namespace=~\"$namespace\"} == 0)", + "format": "time_series", + "instant": true, + "intervalFactor": 1, + "refId": "A", + "step": 4 + } + ], + "thresholds": "", + "title": "Last Config Failed", + "transparent": false, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 3 + }, + "height": "200px", + "id": 86, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "hideEmpty": false, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 300, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "repeatDirection": "h", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "round(sum(irate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (ingress), 0.001)", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ ingress }}", + "metric": "network", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Ingress Request Volume", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "transparent": false, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "max - istio-proxy": "#890f02", + "max - master": "#bf1b00", + "max - prometheus": "#bf1b00" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "decimals": 2, + "editable": false, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 3 + }, + "id": 87, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "hideEmpty": true, + "hideZero": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": 300, + "sort": "avg", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",namespace=~\"$namespace\",ingress=~\"$ingress\",status!~\"[4-5].*\"}[2m])) by (ingress) / sum(rate(nginx_ingress_controller_requests{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (ingress)", + "format": "time_series", + "instant": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "{{ ingress }}", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Ingress Success Rate (non-4|5xx responses)", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 1, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "grid": {}, + "gridPos": { + "h": 6, + "w": 8, + "x": 0, + "y": 10 + }, + "height": "200px", + "id": 32, + "isNew": true, + "legend": { + "alignAsTable": false, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (irate (nginx_ingress_controller_request_size_sum{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"}[2m]))", + "format": "time_series", + "instant": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "Received", + "metric": "network", + "refId": "A", + "step": 10 + }, + { + "expr": "- sum (irate (nginx_ingress_controller_response_size_sum{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"}[2m]))", + "format": "time_series", + "hide": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "Sent", + "metric": "network", + "refId": "B", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Network I/O pressure", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "transparent": false, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "max - istio-proxy": "#890f02", + "max - master": "#bf1b00", + "max - prometheus": "#bf1b00" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "decimals": 2, + "editable": false, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 6, + "w": 8, + "x": 8, + "y": 10 + }, + "id": 77, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "sideWidth": 200, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "avg(nginx_ingress_controller_nginx_process_resident_memory_bytes{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"}) ", + "format": "time_series", + "instant": false, + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "nginx", + "metric": "container_memory_usage:sort_desc", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Average Memory Usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "max - istio-proxy": "#890f02", + "max - master": "#bf1b00" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "decimals": 3, + "editable": false, + "error": false, + "fill": 0, + "grid": {}, + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 10 + }, + "height": "", + "id": 79, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "sort": null, + "sortDesc": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "avg (rate (nginx_ingress_controller_nginx_process_cpu_seconds_total{controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\"}[2m])) ", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "nginx", + "metric": "container_cpu", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Average CPU Usage", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "transparent": false, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": "cores", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "columns": [], + "datasource": "${DS_PROMETHEUS}", + "fontSize": "100%", + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 16 + }, + "hideTimeOverride": false, + "id": 75, + "links": [], + "pageSize": 7, + "repeat": null, + "repeatDirection": "h", + "scroll": true, + "showHeader": true, + "sort": { + "col": 1, + "desc": true + }, + "styles": [ + { + "alias": "Ingress", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "ingress", + "preserveFormat": false, + "sanitize": false, + "thresholds": [], + "type": "string", + "unit": "short" + }, + { + "alias": "Requests", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "Value #A", + "thresholds": [ + "" + ], + "type": "number", + "unit": "ops" + }, + { + "alias": "Errors", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "Value #B", + "thresholds": [], + "type": "number", + "unit": "ops" + }, + { + "alias": "P50 Latency", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "link": false, + "pattern": "Value #C", + "thresholds": [], + "type": "number", + "unit": "dtdurations" + }, + { + "alias": "P90 Latency", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "pattern": "Value #D", + "thresholds": [], + "type": "number", + "unit": "dtdurations" + }, + { + "alias": "P99 Latency", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "pattern": "Value #E", + "thresholds": [], + "type": "number", + "unit": "dtdurations" + }, + { + "alias": "IN", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "Value #F", + "thresholds": [ + "" + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "Time", + "thresholds": [], + "type": "hidden", + "unit": "short" + }, + { + "alias": "OUT", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "mappingType": 1, + "pattern": "Value #G", + "thresholds": [], + "type": "number", + "unit": "Bps" + } + ], + "targets": [ + { + "expr": "histogram_quantile(0.50, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (le, ingress))", + "format": "table", + "hide": false, + "instant": true, + "intervalFactor": 1, + "legendFormat": "{{ ingress }}", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.90, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (le, ingress))", + "format": "table", + "hide": false, + "instant": true, + "intervalFactor": 1, + "legendFormat": "{{ ingress }}", + "refId": "D" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(nginx_ingress_controller_request_duration_seconds_bucket{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (le, ingress))", + "format": "table", + "hide": false, + "instant": true, + "intervalFactor": 1, + "legendFormat": "{{ destination_service }}", + "refId": "E" + }, + { + "expr": "sum(irate(nginx_ingress_controller_request_size_sum{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (ingress)", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ ingress }}", + "refId": "F" + }, + { + "expr": "sum(irate(nginx_ingress_controller_response_size_sum{ingress!=\"\",controller_pod=~\"$controller\",controller_class=~\"$controller_class\",controller_namespace=~\"$namespace\",ingress=~\"$ingress\"}[2m])) by (ingress)", + "format": "table", + "instant": true, + "intervalFactor": 1, + "legendFormat": "{{ ingress }}", + "refId": "G" + } + ], + "timeFrom": null, + "title": "Ingress Percentile Response Times and Transfer Rates", + "transform": "table", + "transparent": false, + "type": "table" + }, + { + "columns": [ + { + "text": "Current", + "value": "current" + } + ], + "datasource": "${DS_PROMETHEUS}", + "fontSize": "100%", + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 24 + }, + "height": "1024", + "id": 85, + "links": [], + "pageSize": 7, + "scroll": true, + "showHeader": true, + "sort": { + "col": 1, + "desc": false + }, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "date" + }, + { + "alias": "TTL", + "colorMode": "cell", + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "pattern": "Current", + "thresholds": [ + "0", + "691200" + ], + "type": "number", + "unit": "s" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "decimals": 2, + "pattern": "/.*/", + "thresholds": [], + "type": "number", + "unit": "short" + } + ], + "targets": [ + { + "expr": "avg(nginx_ingress_controller_ssl_expire_time_seconds{kubernetes_pod_name=~\"$controller\",namespace=~\"$namespace\",ingress=~\"$ingress\"}) by (host) - time()", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ host }}", + "metric": "gke_letsencrypt_cert_expiration", + "refId": "A", + "step": 1 + } + ], + "title": "Ingress Certificate Expiry", + "transform": "timeseries_aggregations", + "type": "table" + } + ], + "refresh": "5s", + "schemaVersion": 16, + "style": "dark", + "tags": [ + "nginx" + ], + "templating": { + "list": [ + { + "hide": 0, + "label": "datasource", + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": ".*", + "current": { + "text": "All", + "value": "$__all" + }, + "datasource": "${DS_PROMETHEUS}", + "hide": 0, + "includeAll": true, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [], + "query": "label_values(nginx_ingress_controller_config_hash, controller_namespace)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "text": "All", + "value": "$__all" + }, + "datasource": "${DS_PROMETHEUS}", + "hide": 0, + "includeAll": true, + "label": "Controller Class", + "multi": false, + "name": "controller_class", + "options": [], + "query": "label_values(nginx_ingress_controller_config_hash{namespace=~\"$namespace\"}, controller_class) ", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "text": "All", + "value": "$__all" + }, + "datasource": "${DS_PROMETHEUS}", + "hide": 0, + "includeAll": true, + "label": "Controller", + "multi": false, + "name": "controller", + "options": [], + "query": "label_values(nginx_ingress_controller_config_hash{namespace=~\"$namespace\",controller_class=~\"$controller_class\"}, controller_pod) ", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".*", + "current": { + "tags": [], + "text": "All", + "value": "$__all" + }, + "datasource": "${DS_PROMETHEUS}", + "hide": 0, + "includeAll": true, + "label": "Ingress", + "multi": false, + "name": "ingress", + "options": [], + "query": "label_values(nginx_ingress_controller_requests{namespace=~\"$namespace\",controller_class=~\"$controller_class\",controller=~\"$controller\"}, ingress) ", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "2m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "NGINX / Ingress Controller", + "uid": "nginx", + "version": 1 +} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/ingress-nginx/request-handling-performance.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/ingress-nginx/request-handling-performance.json new file mode 100755 index 000000000..5635ae976 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/ingress-nginx/request-handling-performance.json @@ -0,0 +1,981 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "6.6.0" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "", + "editable": true, + "gnetId": 9614, + "graphTooltip": 1, + "id": null, + "iteration": 1582146566338, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "description": "Total time taken for nginx and upstream servers to process a request and send a response", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 91, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(\n 0.5,\n sum by (le)(\n rate(\n nginx_ingress_controller_request_duration_seconds_bucket{\n ingress =~ \"$ingress\"\n }[1m]\n )\n )\n)", + "interval": "", + "legendFormat": ".5", + "refId": "D" + }, + { + "expr": "histogram_quantile(\n 0.95,\n sum by (le)(\n rate(\n nginx_ingress_controller_request_duration_seconds_bucket{\n ingress =~ \"$ingress\"\n }[1m]\n )\n )\n)", + "interval": "", + "legendFormat": ".95", + "refId": "B" + }, + { + "expr": "histogram_quantile(\n 0.99,\n sum by (le)(\n rate(\n nginx_ingress_controller_request_duration_seconds_bucket{\n ingress =~ \"$ingress\"\n }[1m]\n )\n )\n)", + "interval": "", + "legendFormat": ".99", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Total request handling time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "description": "The time spent on receiving the response from the upstream server", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "hiddenSeries": false, + "id": 94, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(\n 0.5,\n sum by (le)(\n rate(\n nginx_ingress_controller_response_duration_seconds_bucket{\n ingress =~ \"$ingress\"\n }[1m]\n )\n )\n)", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": ".5", + "refId": "D" + }, + { + "expr": "histogram_quantile(\n 0.95,\n sum by (le)(\n rate(\n nginx_ingress_controller_response_duration_seconds_bucket{\n ingress =~ \"$ingress\"\n }[1m]\n )\n )\n)", + "interval": "", + "legendFormat": ".95", + "refId": "B" + }, + { + "expr": "histogram_quantile(\n 0.99,\n sum by (le)(\n rate(\n nginx_ingress_controller_response_duration_seconds_bucket{\n ingress =~ \"$ingress\"\n }[1m]\n )\n )\n)", + "interval": "", + "legendFormat": ".99", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Upstream response time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "hiddenSeries": false, + "id": 93, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": " sum by (path)(\n rate(\n nginx_ingress_controller_request_duration_seconds_count{\n ingress =~ \"$ingress\"\n }[1m]\n )\n )\n", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ path }}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Request volume by Path", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "description": "For each path observed, its median upstream response time", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "hiddenSeries": false, + "id": 98, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(\n .5,\n sum by (le, path)(\n rate(\n nginx_ingress_controller_response_duration_seconds_bucket{\n ingress =~ \"$ingress\"\n }[1m]\n )\n )\n)", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ path }}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Median upstream response time by Path", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "description": "Percentage of 4xx and 5xx responses among all responses.", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "hiddenSeries": false, + "id": 100, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (path) (rate(nginx_ingress_controller_request_duration_seconds_count{\n ingress = \"$ingress\",\n status =~ \"[4-5].*\"\n}[1m])) / sum by (path) (rate(nginx_ingress_controller_request_duration_seconds_count{\n ingress = \"$ingress\",\n}[1m]))", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ path }}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Response error rate by Path", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "description": "For each path observed, the sum of upstream request time", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "hiddenSeries": false, + "id": 102, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (path) (rate(nginx_ingress_controller_response_duration_seconds_sum{ingress = \"$ingress\"}[1m]))", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ path }}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Upstream time consumed by Path", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 24 + }, + "hiddenSeries": false, + "id": 101, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": " sum (\n rate(\n nginx_ingress_controller_request_duration_seconds_count{\n ingress =~ \"$ingress\",\n status =~\"[4-5].*\",\n }[1m]\n )\n ) by(path, status)\n", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ path }} {{ status }}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Response error volume by Path", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 24 + }, + "hiddenSeries": false, + "id": 99, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (\n rate (\n nginx_ingress_controller_response_size_sum {\n ingress =~ \"$ingress\",\n }[1m]\n )\n) by (path) / sum (\n rate(\n nginx_ingress_controller_response_size_count {\n ingress =~ \"$ingress\",\n }[1m]\n )\n) by (path)\n", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ path }}", + "refId": "D" + }, + { + "expr": " sum (rate(nginx_ingress_controller_response_size_bucket{\n namespace =~ \"$namespace\",\n ingress =~ \"$ingress\",\n }[1m])) by (le)\n", + "hide": true, + "legendFormat": "{{le}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average response size by Path", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 32 + }, + "hiddenSeries": false, + "id": 96, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (\n rate(\n nginx_ingress_controller_ingress_upstream_latency_seconds_sum {\n ingress =~ \"$ingress\",\n }[1m]\n)) / sum (\n rate(\n nginx_ingress_controller_ingress_upstream_latency_seconds_count {\n ingress =~ \"$ingress\",\n }[1m]\n )\n)\n", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "average", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Upstream service latency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "30s", + "schemaVersion": 22, + "style": "dark", + "tags": [ + "nginx" + ], + "templating": { + "list": [ + { + "hide": 0, + "label": "datasource", + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": ".*", + "current": {}, + "datasource": "${DS_PROMETHEUS}", + "definition": "label_values(nginx_ingress_controller_requests, ingress) ", + "hide": 0, + "includeAll": true, + "label": "Service Ingress", + "multi": false, + "name": "ingress", + "options": [], + "query": "label_values(nginx_ingress_controller_requests, ingress) ", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "2m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "NGINX / Request Handling Performance", + "uid": "4GFbkOsZk", + "version": 1 + } diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/cluster/rancher-cluster-nodes.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/cluster/rancher-cluster-nodes.json new file mode 100755 index 000000000..b8c1ab7e6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/cluster/rancher-cluster-nodes.json @@ -0,0 +1,776 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 28, + "links": [], + "panels": [ + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - avg(irate({__name__=~\"node_cpu_seconds_total|windows_cpu_time_total\",mode=\"idle\"}[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Load[5m] ({{instance}})" + }, + "properties": [] + } + ] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "hiddenSeries": false, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_load5 OR avg_over_time(windows_system_processor_queue_length[5m])) by (instance)", + "interval": "", + "legendFormat": "Load[5m] ({{instance}})", + "refId": "A" + }, + { + "expr": "sum(node_load1 OR avg_over_time(windows_system_processor_queue_length[1m])) by (instance)", + "interval": "", + "legendFormat": "Load[1m] ({{instance}})", + "refId": "B" + }, + { + "expr": "sum(node_load15 OR avg_over_time(windows_system_processor_queue_length[15m])) by (instance)", + "interval": "", + "legendFormat": "Load[15m] ({{instance}})", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Load Average", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - sum(node_memory_MemAvailable_bytes OR windows_os_physical_memory_free_bytes) by (instance) / sum(node_memory_MemTotal_bytes OR windows_cs_physical_memory_bytes) by (instance) ", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - (sum(node_filesystem_free_bytes{device!~\"rootfs|HarddiskVolume.+\"} OR windows_logical_disk_free_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\"}) by (instance) / sum(node_filesystem_size_bytes{device!~\"rootfs|HarddiskVolume.+\"} OR windows_logical_disk_size_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\"}) by (instance))", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 7 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(node_disk_read_bytes_total[$__rate_interval]) OR rate(windows_logical_disk_read_bytes_total[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "Read ({{instance}})", + "refId": "A" + }, + { + "expr": "sum(rate(node_disk_written_bytes_total[$__rate_interval]) OR rate(windows_logical_disk_write_bytes_total[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "Write ({{instance}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 7 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(node_network_receive_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) by (instance) OR sum(rate(windows_net_packets_received_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "Receive Errors ({{instance}})", + "refId": "A" + }, + { + "expr": "sum(rate(node_network_receive_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) by (instance) OR sum(rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "Receive Total ({{instance}})", + "refId": "B" + }, + { + "expr": "sum(rate(node_network_transmit_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) by (instance) OR sum(rate(windows_net_packets_outbound_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "Transmit Errors ({{instance}})", + "refId": "C" + }, + { + "expr": "sum(rate(node_network_receive_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) by (instance) OR sum(rate(windows_net_packets_received_discarded{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "Receive Dropped ({{instance}})", + "refId": "D" + }, + { + "expr": "sum(rate(node_network_transmit_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) by (instance) OR sum(rate(windows_net_packets_outbound_discarded{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "Transmit Dropped ({{instance}})", + "refId": "E" + }, + { + "expr": "sum(rate(node_network_transmit_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) by (instance) OR sum(rate(windows_net_packets_sent_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "Transmit Total ({{instance}})", + "refId": "F" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network Traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 14 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(node_network_transmit_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval]) OR rate(windows_net_packets_sent_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "Transmit Total ({{instance}})", + "refId": "A" + }, + { + "expr": "sum(rate(node_network_receive_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval]) OR rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "Receive Total ({{instance}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Rancher / Cluster (Nodes)", + "uid": "rancher-cluster-nodes-1", + "version": 3 +} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/cluster/rancher-cluster.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/cluster/rancher-cluster.json new file mode 100755 index 000000000..29cc91675 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/cluster/rancher-cluster.json @@ -0,0 +1,759 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 28, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - avg(irate({__name__=~\"node_cpu_seconds_total|windows_cpu_time_total\",mode=\"idle\"}[$__rate_interval]))", + "legendFormat": "Total", + "interval": "", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Load[5m]" + }, + "properties": [] + } + ] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "hiddenSeries": false, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_load5 OR avg_over_time(windows_system_processor_queue_length[5m]))", + "interval": "", + "legendFormat": "Load[5m]", + "refId": "A" + }, + { + "expr": "sum(node_load1 OR avg_over_time(windows_system_processor_queue_length[1m]))", + "interval": "", + "legendFormat": "Load[1m]", + "refId": "B" + }, + { + "expr": "sum(node_load15 OR avg_over_time(windows_system_processor_queue_length[15m]))", + "interval": "", + "legendFormat": "Load[15m]", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Load Average", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - sum(node_memory_MemAvailable_bytes OR windows_os_physical_memory_free_bytes) / sum(node_memory_MemTotal_bytes OR windows_cs_physical_memory_bytes)", + "legendFormat": "Total", + "interval": "", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - (sum(node_filesystem_free_bytes{device!~\"rootfs|HarddiskVolume.+\"} OR windows_logical_disk_free_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\"}) / sum(node_filesystem_size_bytes{device!~\"rootfs|HarddiskVolume.+\"} OR windows_logical_disk_size_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\"}))", + "legendFormat": "Total", + "interval": "", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 7 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(node_disk_read_bytes_total[$__rate_interval]) OR rate(windows_logical_disk_read_bytes_total[$__rate_interval]))", + "interval": "", + "legendFormat": "Read", + "refId": "A" + }, + { + "expr": "sum(rate(node_disk_written_bytes_total[$__rate_interval]) OR rate(windows_logical_disk_write_bytes_total[$__rate_interval]))", + "interval": "", + "legendFormat": "Write", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 7 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(rate(node_network_receive_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) OR on() vector(0))", + "interval": "", + "legendFormat": "Receive Errors", + "refId": "A" + }, + { + "expr": "(sum(rate(node_network_receive_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) OR on() vector(0))", + "interval": "", + "legendFormat": "Receive Total", + "refId": "B" + }, + { + "expr": "(sum(rate(node_network_transmit_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_outbound_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) OR on() vector(0))", + "interval": "", + "legendFormat": "Transmit Errors", + "refId": "C" + }, + { + "expr": "(sum(rate(node_network_receive_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_discarded{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) OR on() vector(0))", + "interval": "", + "legendFormat": "Receive Dropped", + "refId": "D" + }, + { + "expr": "(sum(rate(node_network_transmit_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_outbound_discarded{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) OR on() vector(0))", + "interval": "", + "legendFormat": "Transmit Dropped", + "refId": "E" + }, + { + "expr": "(sum(rate(node_network_transmit_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_sent_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval])) OR on() vector(0))", + "interval": "", + "legendFormat": "Transmit Total", + "refId": "F" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network Traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 14 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(node_network_transmit_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval]) OR rate(windows_net_packets_sent_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval]))", + "interval": "", + "legendFormat": "Transmit Total", + "refId": "A" + }, + { + "expr": "sum(rate(node_network_receive_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\"}[$__rate_interval]) OR rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*'}[$__rate_interval]))", + "interval": "", + "legendFormat": "Receive Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Rancher / Cluster", + "uid": "rancher-cluster-1", + "version": 3 +} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/home/rancher-default-home.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/home/rancher-default-home.json new file mode 100755 index 000000000..86829f012 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/home/rancher-default-home.json @@ -0,0 +1,1273 @@ +{ + "annotations": { + "list": [] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [ + { + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 1, + "title": "", + "type": "welcome" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "Prometheus", + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 0, + "y": 4 + }, + "height": "180px", + "id": 6, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "(1 - (avg(irate({__name__=~\"node_cpu_seconds_total|windows_cpu_time_total\",mode=\"idle\"}[5m])))) * 100", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "65, 90", + "title": "CPU Utilization", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "Prometheus", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 8, + "y": 4 + }, + "height": "180px", + "id": 4, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "(1 - sum({__name__=~\"node_memory_MemAvailable_bytes|windows_os_physical_memory_free_bytes\"}) / sum({__name__=~\"node_memory_MemTotal_bytes|windows_cs_physical_memory_bytes\"})) * 100", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "65, 90", + "title": "Memory Utilization", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": true, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "Prometheus", + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 5, + "w": 8, + "x": 16, + "y": 4 + }, + "height": "180px", + "id": 7, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "1 - (((sum(node_filesystem_free_bytes{device!~\"rootfs|HarddiskVolume.+\"}) OR on() vector(0)) + (sum(windows_logical_disk_free_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\"}) OR on() vector(0))) / ((sum(node_filesystem_size_bytes{device!~\"rootfs|HarddiskVolume.+\"}) OR on() vector(0)) + (sum(windows_logical_disk_size_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\"}) OR on() vector(0))))", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "metric": "", + "refId": "A", + "step": 10 + } + ], + "thresholds": "65, 90", + "title": "Disk Utilization", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "Prometheus", + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 0, + "y": 9 + }, + "height": "1px", + "id": 11, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": " cores", + "postfixFontSize": "30%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(irate({__name__=~\"node_cpu_seconds_total|windows_cpu_time_total\",mode!=\"idle\"}[5m]))", + "format": "time_series", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "CPU Used", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "Prometheus", + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 4, + "y": 9 + }, + "height": "1px", + "id": 12, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": " cores", + "postfixFontSize": "30%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(kube_node_status_allocatable_cpu_cores{})", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "CPU Total", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "Prometheus", + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 8, + "y": 9 + }, + "height": "1px", + "id": 9, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "20%", + "prefix": "", + "prefixFontSize": "20%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum({__name__=~\"node_memory_MemTotal_bytes|windows_cs_physical_memory_bytes\"}) - sum({__name__=~\"node_memory_MemAvailable_bytes|windows_os_physical_memory_free_bytes\"})", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "Memory Used", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "Prometheus", + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 12, + "y": 9 + }, + "height": "1px", + "id": 10, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum (kube_node_status_allocatable_memory_bytes{})", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "Memory Total", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "Prometheus", + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 16, + "y": 9 + }, + "height": "1px", + "id": 13, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "(sum(node_filesystem_size_bytes{device!~\"rootfs|HarddiskVolume.+\"}) - sum(node_filesystem_free_bytes{device!~\"rootfs|HarddiskVolume.+\"}) OR on() vector(0)) + (sum(windows_logical_disk_size_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\"}) - sum(windows_logical_disk_free_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\"}) OR on() vector(0))", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "Disk Used", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "Prometheus", + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 20, + "y": 9 + }, + "height": "1px", + "id": 14, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "(sum(node_filesystem_size_bytes{device!~\"rootfs|HarddiskVolume.+\"}) OR on() vector(0)) + (sum(windows_logical_disk_size_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\"}) OR on() vector(0))", + "interval": "10s", + "intervalFactor": 1, + "refId": "A", + "step": 10 + } + ], + "thresholds": "", + "title": "Disk Total", + "type": "singlestat", + "valueFontSize": "50%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 8, + "x": 0, + "y": 12 + }, + "hiddenSeries": false, + "id": 2051, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - (avg(irate({__name__=~\"node_cpu_seconds_total|windows_cpu_time_total\",mode=\"idle\"}[$__rate_interval])))", + "format": "time_series", + "hide": false, + "instant": false, + "intervalFactor": 1, + "legendFormat": "Cluster", + "refId": "A" + }, + { + "expr": "1 - avg(irate({__name__=~\"node_cpu_seconds_total|windows_cpu_time_total\", mode=\"idle\"}[$__rate_interval])) by (instance)", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{ instance }}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percentunit", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 8, + "x": 8, + "y": 12 + }, + "hiddenSeries": false, + "id": 2052, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "100 * (1 - sum({__name__=~\"node_memory_MemAvailable_bytes|windows_os_physical_memory_free_bytes\"}) / sum({__name__=~\"node_memory_MemTotal_bytes|windows_cs_physical_memory_bytes\"}))", + "format": "time_series", + "hide": false, + "instant": false, + "intervalFactor": 1, + "legendFormat": "Cluster", + "refId": "A" + }, + { + "expr": "100 * (1- sum({__name__=~\"node_memory_MemAvailable_bytes|windows_os_physical_memory_free_bytes\"}) by (instance) / sum({__name__=~\"node_memory_MemTotal_bytes|windows_cs_physical_memory_bytes\"}) by (instance))", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{ instance }}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 12 + }, + "hiddenSeries": false, + "id": 2053, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(1 - ((sum(node_filesystem_free_bytes{device!~\"rootfs|HarddiskVolume.+\"}) OR on() vector(0)) + (sum(windows_logical_disk_free_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\"} OR on() vector(0)))) / ((sum(node_filesystem_size_bytes{device!~\"rootfs|HarddiskVolume.+\"}) OR on() vector(0)) + (sum(windows_logical_disk_size_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\"}) OR on() vector(0)))) * 100", + "legendFormat": "Cluster", + "refId": "A" + }, + { + "expr": "(1 - (sum(node_filesystem_free_bytes{device!~\"rootfs|HarddiskVolume.+\"}) by (instance)) / sum(node_filesystem_size_bytes{device!~\"rootfs|HarddiskVolume.+\"}) by (instance)) * 100", + "hide": false, + "legendFormat": "{{ instance }}", + "refId": "B" + }, + { + "expr": "(1 - (sum(windows_logical_disk_free_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\"}) by (instance)) / sum(windows_logical_disk_size_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\"}) by (instance)) * 100", + "hide": false, + "legendFormat": "{{ instance }}", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "percent", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "folderId": 0, + "gridPos": { + "h": 15, + "w": 12, + "x": 0, + "y": 18 + }, + "headings": true, + "id": 3, + "limit": 30, + "links": [], + "query": "", + "recent": true, + "search": true, + "starred": false, + "tags": [], + "title": "Dashboards", + "type": "dashlist" + }, + { + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 18 + }, + "id": 2055, + "options": { + "content": "## About Rancher Monitoring\n\nRancher Monitoring is a Helm chart developed by Rancher that is powered by [Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator). It is based on the upstream [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) Helm chart maintained by the Prometheus community.\n\nBy default, the chart deploys Grafana alongside a set of Grafana dashboards curated by the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus) project.\n\nFor more information on how Rancher Monitoring differs from [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack), please view the CHANGELOG.md of the rancher-monitoring chart located in the [rancher/charts](https://github.com/rancher/charts) repository.\n\nFor more information about how to configure Rancher Monitoring, please view the [Rancher docs](https://rancher.com/docs/rancher/v2.x/en/).\n\n", + "mode": "markdown" + }, + "pluginVersion": "7.1.0", + "timeFrom": null, + "timeShift": null, + "title": "", + "type": "text" + } + ], + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "hidden": true, + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ], + "type": "timepicker" + }, + "timezone": "browser", + "title": "Home", + "uid": "rancher-home-1", + "version": 5 +} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-etcd-nodes.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-etcd-nodes.json new file mode 100755 index 000000000..300b61276 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-etcd-nodes.json @@ -0,0 +1,670 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 32, + "links": [], + "panels": [ + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_network_client_grpc_received_bytes_total{job=\"kube-etcd\"}[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "Client Traffic In ({{instance}})", + "refId": "A" + }, + { + "expr": "sum(rate(etcd_network_client_grpc_sent_bytes_total{job=\"kube-etcd\"}[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "Client Traffic Out ({{instance}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "GRPC Client Traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Load[5m]({{instance}})" + }, + "properties": [] + } + ] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "hiddenSeries": false, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(etcd_debugging_mvcc_db_total_size_in_bytes) by (instance)", + "interval": "", + "legendFormat": "DB Size ({{instance}})", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DB Size", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(grpc_server_started_total{grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}) by (instance) - sum(grpc_server_handled_total{grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}) by (instance)", + "interval": "", + "legendFormat": "Watch Streams ({{instance}})", + "refId": "A" + }, + { + "expr": "sum(grpc_server_started_total{grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) by (instance) - sum(grpc_server_handled_total{grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) by (instance)", + "interval": "", + "legendFormat": "Lease Watch Stream ({{instance}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Active Streams", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_server_proposals_committed_total[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "Proposal Committed ({{instance}})", + "refId": "A" + }, + { + "expr": "sum(rate(etcd_server_proposals_applied_total[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "Proposal Applied ({{instance}})", + "refId": "B" + }, + { + "expr": "sum(rate(etcd_server_proposals_failed_total[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "Proposal Failed ({{instance}})", + "refId": "C" + }, + { + "expr": "sum(etcd_server_proposals_pending) by (instance)", + "interval": "", + "legendFormat": "Proposal Pending ({{instance}})", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Raft Proposals", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 7 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(grpc_server_started_total{grpc_type=\"unary\"}[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "RPC Rate ({{instance}})", + "refId": "A" + }, + { + "expr": "sum(rate(grpc_server_handled_total{grpc_type=\"unary\",grpc_code!=\"OK\"}[$__rate_interval])) by (instance)", + "interval": "", + "legendFormat": "RPC Failure Rate ({{instance}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "RPC Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 7 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[$__rate_interval])) by (instance, le))", + "interval": "", + "legendFormat": "WAL fsync ({{instance}})", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[$__rate_interval])) by (instance, le))", + "interval": "", + "legendFormat": "DB fsync ({{instance}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Sync Duration", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 2, + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Rancher / etcd (Nodes)", + "uid": "rancher-etcd-nodes-1", + "version": 5 +} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-etcd.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-etcd.json new file mode 100755 index 000000000..d58e23bce --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-etcd.json @@ -0,0 +1,652 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 33, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_network_client_grpc_received_bytes_total{job=\"kube-etcd\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Client Traffic In", + "refId": "A" + }, + { + "expr": "sum(rate(etcd_network_client_grpc_sent_bytes_total{job=\"kube-etcd\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Client Traffic Out", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "GRPC Client Traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [ + { + "properties": [] + } + ] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "hiddenSeries": false, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(etcd_debugging_mvcc_db_total_size_in_bytes)", + "interval": "", + "legendFormat": "DB Size", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DB Size", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(grpc_server_started_total{grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"})", + "interval": "", + "legendFormat": "Watch Streams", + "refId": "A" + }, + { + "expr": "sum(grpc_server_started_total{grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"})", + "interval": "", + "legendFormat": "Lease Watch Stream", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Active Streams", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_server_proposals_committed_total[$__rate_interval]))", + "interval": "", + "legendFormat": "Proposal Committed", + "refId": "A" + }, + { + "expr": "sum(rate(etcd_server_proposals_applied_total[$__rate_interval]))", + "interval": "", + "legendFormat": "Proposal Applied", + "refId": "B" + }, + { + "expr": "sum(rate(etcd_server_proposals_failed_total[$__rate_interval]))", + "interval": "", + "legendFormat": "Proposal Failed", + "refId": "C" + }, + { + "expr": "sum(etcd_server_proposals_pending)", + "interval": "", + "legendFormat": "Proposal Pending", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Raft Proposals", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 7 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(grpc_server_started_total{grpc_type=\"unary\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "RPC Rate", + "refId": "A" + }, + { + "expr": "sum(rate(grpc_server_handled_total{grpc_type=\"unary\",grpc_code!=\"OK\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "RPC Failure Rate", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "RPC Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 7 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket[$__rate_interval])) by (instance, le))", + "interval": "", + "legendFormat": "WAL fsync", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket[$__rate_interval])) by (instance, le))", + "interval": "", + "legendFormat": "DB fsync", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Sync Duration", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 2, + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Rancher / etcd", + "uid": "rancher-etcd-1", + "version": 4 +} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-k8s-components-nodes.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-k8s-components-nodes.json new file mode 100755 index 000000000..9de59be49 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-k8s-components-nodes.json @@ -0,0 +1,510 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 30, + "links": [], + "panels": [ + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(apiserver_request_total[$__rate_interval])) by (instance, code)", + "interval": "", + "legendFormat": "{{code}}({{instance}})", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "API Server Request Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Load[5m]({{instance}})" + }, + "properties": [] + } + ] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "hiddenSeries": false, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"deployment\"}) by (instance, name)", + "interval": "", + "legendFormat": "Deployment Depth ({{instance}})", + "refId": "A" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"volumes\"}) by (instance, name)", + "interval": "", + "legendFormat": "Volumes Depth ({{instance}})", + "refId": "B" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"replicaset\"}) by (instance, name)", + "interval": "", + "legendFormat": "ReplicaSet Depth ({{instance}})", + "refId": "C" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"service\"}) by (instance, name)", + "interval": "", + "legendFormat": "Service Depth ({{instance}})", + "refId": "D" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"serviceaccount\"}) by (instance, name)", + "interval": "", + "legendFormat": "ServiceAccount Depth ({{instance}})", + "refId": "E" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"endpoint\"}) by (instance, name)", + "interval": "", + "legendFormat": "Endpoint Depth ({{instance}})", + "refId": "F" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"daemonset\"}) by (instance, name)", + "interval": "", + "legendFormat": "DaemonSet Depth ({{instance}})", + "refId": "G" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"statefulset\"}) by (instance, name)", + "interval": "", + "legendFormat": "StatefulSet Depth ({{instance}})", + "refId": "H" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"replicationmanager\"}) by (instance, name)", + "interval": "", + "legendFormat": "ReplicationManager Depth ({{instance}})", + "refId": "I" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Controller Manager Queue Depth", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(kube_pod_status_scheduled{condition=\"false\"})", + "interval": "", + "legendFormat": "Failed To Schedule", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Pod Scheduling Status", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{instance}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(nginx_ingress_controller_nginx_process_connections{state=\"reading\"}) by (instance)", + "interval": "", + "legendFormat": "Reading ({{instance}})", + "refId": "A" + }, + { + "expr": "sum(nginx_ingress_controller_nginx_process_connections{state=\"waiting\"}) by (instance)", + "interval": "", + "legendFormat": "Waiting ({{instance}})", + "refId": "B" + }, + { + "expr": "sum(nginx_ingress_controller_nginx_process_connections{state=\"writing\"}) by (instance)", + "interval": "", + "legendFormat": "Writing ({{instance}})", + "refId": "C" + }, + { + "expr": "sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state=\"accepted\"}[$__rate_interval]))) by (instance)", + "interval": "", + "legendFormat": "Accepted ({{instance}})", + "refId": "D" + }, + { + "expr": "sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state=\"handled\"}[$__rate_interval]))) by (instance)", + "interval": "", + "legendFormat": "Handled ({{instance}})", + "refId": "E" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Ingress Controller Connections", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Rancher / Kubernetes Components (Nodes)", + "uid": "rancher-k8s-components-nodes-1", + "version": 5 +} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-k8s-components.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-k8s-components.json new file mode 100755 index 000000000..ddb0caca5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/k8s/rancher-k8s-components.json @@ -0,0 +1,502 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 31, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(apiserver_request_total[$__rate_interval])) by (code)", + "interval": "", + "legendFormat": "{{code}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "API Server Request Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Load[5m]({{instance}})" + }, + "properties": [] + } + ] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "hiddenSeries": false, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"deployment\"}) by (name)", + "interval": "", + "legendFormat": "Deployment Depth", + "refId": "A" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"volumes\"}) by (name)", + "interval": "", + "legendFormat": "Volumes Depth", + "refId": "B" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"replicaset\"}) by (name)", + "interval": "", + "legendFormat": "Replicaset Depth", + "refId": "C" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"service\"}) by (name)", + "interval": "", + "legendFormat": "Service Depth", + "refId": "D" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"serviceaccount\"}) by (name)", + "interval": "", + "legendFormat": "ServiceAccount Depth", + "refId": "E" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"endpoint\"}) by (name)", + "interval": "", + "legendFormat": "Endpoint Depth", + "refId": "F" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"daemonset\"}) by (name)", + "interval": "", + "legendFormat": "DaemonSet Depth", + "refId": "G" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"statefulset\"}) by (name)", + "interval": "", + "legendFormat": "StatefulSet Depth", + "refId": "H" + }, + { + "expr": "sum(workqueue_depth{component=\"kube-controller-manager\", name=\"replicationmanager\"}) by (name)", + "interval": "", + "legendFormat": "ReplicationManager Depth", + "refId": "I" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Controller Manager Queue Depth", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(kube_pod_status_scheduled{condition=\"false\"})", + "interval": "", + "legendFormat": "Failed To Schedule", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Pod Scheduling Status", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(nginx_ingress_controller_nginx_process_connections{state=\"reading\"})", + "interval": "", + "legendFormat": "Reading", + "refId": "A" + }, + { + "expr": "sum(nginx_ingress_controller_nginx_process_connections{state=\"waiting\"})", + "interval": "", + "legendFormat": "Waiting", + "refId": "B" + }, + { + "expr": "sum(nginx_ingress_controller_nginx_process_connections{state=\"writing\"})", + "interval": "", + "legendFormat": "Writing", + "refId": "C" + }, + { + "expr": "sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state=\"accepted\"}[$__rate_interval])))", + "interval": "", + "legendFormat": "Accepted", + "refId": "D" + }, + { + "expr": "sum(ceil(increase(nginx_ingress_controller_nginx_process_connections_total{state=\"handled\"}[$__rate_interval])))", + "interval": "", + "legendFormat": "Handled", + "refId": "E" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Ingress Controller Connections", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Rancher / Kubernetes Components", + "uid": "rancher-k8s-components-1", + "version": 5 +} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/nodes/rancher-node-detail.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/nodes/rancher-node-detail.json new file mode 100755 index 000000000..0b57efa2e --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/nodes/rancher-node-detail.json @@ -0,0 +1,789 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 28, + "links": [], + "panels": [ + { + "aliasColors": { + "{{mode}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "avg(irate({__name__=~\"node_cpu_seconds_total|windows_cpu_time_total\", instance=\"$instance\"}[$__rate_interval])) by (mode)", + "interval": "", + "legendFormat": "{{mode}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Load[5m]" + }, + "properties": [] + } + ] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "hiddenSeries": false, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_load5{instance=~\"$instance\"} OR avg_over_time(windows_system_processor_queue_length{instance=~\"$instance\"}[5m]))", + "interval": "", + "legendFormat": "Load[5m]", + "refId": "A" + }, + { + "expr": "sum(node_load1{instance=~\"$instance\"} OR avg_over_time(windows_system_processor_queue_length{instance=~\"$instance\"}[1m]))", + "interval": "", + "legendFormat": "Load[1m]", + "refId": "B" + }, + { + "expr": "sum(node_load15{instance=~\"$instance\"} OR avg_over_time(windows_system_processor_queue_length{instance=~\"$instance\"}[15m]))", + "interval": "", + "legendFormat": "Load[15m]", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Load Average", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - (node_memory_MemAvailable_bytes{instance=~\"$instance\"} OR windows_os_physical_memory_free_bytes{instance=~\"$instance\"}) / (node_memory_MemTotal_bytes{instance=~\"$instance\"} OR windows_cs_physical_memory_bytes{instance=~\"$instance\"})", + "interval": "", + "legendFormat": "Total", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{device}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - (sum(node_filesystem_free_bytes{device!~\"rootfs|HarddiskVolume.+\", instance=~\"$instance\"} OR windows_logical_disk_free_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\", instance=~\"$instance\"}) by (device) / sum(node_filesystem_size_bytes{device!~\"rootfs|HarddiskVolume.+\", instance=~\"$instance\"} OR windows_logical_disk_size_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\", instance=~\"$instance\"}) by (device))", + "interval": "", + "legendFormat": "{{device}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{device}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 7 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(node_disk_read_bytes_total{instance=~\"$instance\"}[$__rate_interval]) OR rate(windows_logical_disk_read_bytes_total{instance=~\"$instance\"}[$__rate_interval])) by (device)", + "interval": "", + "legendFormat": "Read ({{device}})", + "refId": "A" + }, + { + "expr": "sum(rate(node_disk_written_bytes_total{instance=~\"$instance\"}[$__rate_interval]) OR rate(windows_logical_disk_write_bytes_total{instance=~\"$instance\"}[$__rate_interval])) by (device)", + "interval": "", + "legendFormat": "Write ({{device}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{device}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 7 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(node_network_receive_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) by (device) OR sum(rate(windows_net_packets_received_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", + "interval": "", + "legendFormat": "Receive Errors ({{device}})", + "refId": "A" + }, + { + "expr": "sum(rate(node_network_receive_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) by (device) OR sum(rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", + "interval": "", + "legendFormat": "Receive Total ({{device}})", + "refId": "B" + }, + { + "expr": "sum(rate(node_network_transmit_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) by (device) OR sum(rate(windows_net_packets_outbound_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", + "interval": "", + "legendFormat": "Transmit Errors ({{device}})", + "refId": "C" + }, + { + "expr": "sum(rate(node_network_receive_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) by (device) OR sum(rate(windows_net_packets_received_discarded{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", + "interval": "", + "legendFormat": "Receive Dropped ({{device}})", + "refId": "D" + }, + { + "expr": "sum(rate(node_network_transmit_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) by (device) OR sum(rate(windows_net_packets_outbound_discarded{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", + "interval": "", + "legendFormat": "Transmit Dropped ({{device}})", + "refId": "E" + }, + { + "expr": "sum(rate(node_network_transmit_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) by (device) OR sum(rate(windows_net_packets_sent_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", + "interval": "", + "legendFormat": "Transmit Total ({{device}})", + "refId": "F" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network Traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{device}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 14 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(node_network_transmit_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval]) OR rate(windows_net_packets_sent_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", + "interval": "", + "legendFormat": "Transmit Total ({{device}})", + "refId": "A" + }, + { + "expr": "sum(rate(node_network_receive_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval]) OR rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) by (device)", + "interval": "", + "legendFormat": "Receive Total ({{device}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "instance", + "query": "label_values({__name__=~\"node_exporter_build_info|windows_exporter_build_info\"}, instance)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Rancher / Node (Detail)", + "uid": "rancher-node-detail-1", + "version": 3 +} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/nodes/rancher-node.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/nodes/rancher-node.json new file mode 100755 index 000000000..7324c4164 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/nodes/rancher-node.json @@ -0,0 +1,776 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 28, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - avg(irate({__name__=~\"node_cpu_seconds_total|windows_cpu_time_total\", instance=\"$instance\", mode=\"idle\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Total", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Load[5m]" + }, + "properties": [] + } + ] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "hiddenSeries": false, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_load5{instance=~\"$instance\"} OR avg_over_time(windows_system_processor_queue_length{instance=~\"$instance\"}[5m]))", + "interval": "", + "legendFormat": "Load[5m]", + "refId": "A" + }, + { + "expr": "sum(node_load1{instance=~\"$instance\"} OR avg_over_time(windows_system_processor_queue_length{instance=~\"$instance\"}[1m]))", + "interval": "", + "legendFormat": "Load[1m]", + "refId": "B" + }, + { + "expr": "sum(node_load15{instance=~\"$instance\"} OR avg_over_time(windows_system_processor_queue_length{instance=~\"$instance\"}[15m]))", + "interval": "", + "legendFormat": "Load[15m]", + "refId": "C" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Load Average", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - sum(node_memory_MemAvailable_bytes{instance=~\"$instance\"} OR windows_os_physical_memory_free_bytes{instance=~\"$instance\"}) / sum(node_memory_MemTotal_bytes{instance=~\"$instance\"} OR windows_cs_physical_memory_bytes{instance=~\"$instance\"})", + "interval": "", + "legendFormat": "Total", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - (sum(node_filesystem_free_bytes{device!~\"rootfs|HarddiskVolume.+\", instance=~\"$instance\"} OR windows_logical_disk_free_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\", instance=~\"$instance\"}) / sum(node_filesystem_size_bytes{device!~\"rootfs|HarddiskVolume.+\", instance=~\"$instance\"} OR windows_logical_disk_size_bytes{volume!~\"(HarddiskVolume.+|[A-Z]:.+)\", instance=~\"$instance\"}))", + "interval": "", + "legendFormat": "Total", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 7 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(node_disk_read_bytes_total{instance=~\"$instance\"}[$__rate_interval]) OR rate(windows_logical_disk_read_bytes_total{instance=~\"$instance\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Read", + "refId": "A" + }, + { + "expr": "sum(rate(node_disk_written_bytes_total{instance=~\"$instance\"}[$__rate_interval]) OR rate(windows_logical_disk_write_bytes_total{instance=~\"$instance\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Write", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 7 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(rate(node_network_receive_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0))", + "interval": "", + "legendFormat": "Receive Errors", + "refId": "A" + }, + { + "expr": "(sum(rate(node_network_receive_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0))", + "interval": "", + "legendFormat": "Receive Total", + "refId": "B" + }, + { + "expr": "(sum(rate(node_network_transmit_errs_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_outbound_errors{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0))", + "interval": "", + "legendFormat": "Transmit Errors", + "refId": "C" + }, + { + "expr": "(sum(rate(node_network_receive_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_received_discarded{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0))", + "interval": "", + "legendFormat": "Receive Dropped", + "refId": "D" + }, + { + "expr": "(sum(rate(node_network_transmit_drop_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_outbound_discarded{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0))", + "interval": "", + "legendFormat": "Transmit Dropped", + "refId": "E" + }, + { + "expr": "(sum(rate(node_network_transmit_packets_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0)) + (sum(rate(windows_net_packets_sent_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval])) OR on() vector(0))", + "interval": "", + "legendFormat": "Transmit Total", + "refId": "F" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network Traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 14 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(node_network_transmit_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval]) OR rate(windows_net_packets_sent_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Transmit Total", + "refId": "A" + }, + { + "expr": "sum(rate(node_network_receive_bytes_total{device!~\"lo|veth.*|docker.*|flannel.*|cali.*|cbr.*\", instance=~\"$instance\"}[$__rate_interval]) OR rate(windows_net_packets_received_total{nic!~'.*isatap.*|.*VPN.*|.*Pseudo.*|.*tunneling.*', instance=~\"$instance\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Receive Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "instance", + "query": "label_values({__name__=~\"node_exporter_build_info|windows_exporter_build_info\"}, instance)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Rancher / Node", + "uid": "rancher-node-1", + "version": 3 +} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/pods/rancher-pod-containers.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/pods/rancher-pod-containers.json new file mode 100755 index 000000000..a625a552d --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/pods/rancher-pod-containers.json @@ -0,0 +1,620 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 28, + "iteration": 1618265214337, + "links": [], + "panels": [ + { + "aliasColors": { + "{{container}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_cpu_cfs_throttled_seconds_total{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\", container!=\"\"}[$__rate_interval])) by (container)", + "interval": "", + "legendFormat": "CFS throttled ({{container}})", + "refId": "A" + }, + { + "expr": "sum(rate(container_cpu_system_seconds_total{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container) OR sum(rate(windows_container_cpu_usage_seconds_kernelmode{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container)", + "interval": "", + "legendFormat": "System ({{container}})", + "refId": "B" + }, + { + "expr": "sum(rate(container_cpu_usage_seconds_total{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container) OR sum(rate(windows_container_cpu_usage_seconds_total{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container)", + "interval": "", + "legendFormat": "Total ({{container}})", + "refId": "C" + }, + { + "expr": "sum(rate(container_cpu_user_seconds_total{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container) OR sum(rate(windows_container_cpu_usage_seconds_usermode{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container)", + "interval": "", + "legendFormat": "User ({{container}})", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "cpu", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{container}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\", container!=\"\"} OR windows_container_memory_usage_commit_bytes{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\", container!=\"\"}) by (container)", + "interval": "", + "legendFormat": "({{container}})", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{container}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_network_receive_packets_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container) OR sum(rate(windows_container_network_receive_packets_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container)", + "interval": "", + "legendFormat": "Receive Total ({{container}})", + "refId": "A" + }, + { + "expr": "sum(rate(container_network_transmit_packets_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container) OR sum(rate(windows_container_network_transmit_packets_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container)", + "interval": "", + "legendFormat": "Transmit Total ({{container}})", + "refId": "B" + }, + { + "expr": "sum(rate(container_network_receive_packets_dropped_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container) OR sum(rate(windows_container_network_receive_packets_dropped_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container)", + "interval": "", + "legendFormat": "Receive Dropped ({{container}})", + "refId": "C" + }, + { + "expr": "sum(rate(container_network_receive_errors_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container)", + "interval": "", + "legendFormat": "Receive Errors ({{container}})", + "refId": "D" + }, + { + "expr": "sum(rate(container_network_transmit_errors_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container)", + "interval": "", + "legendFormat": "Transmit Errors ({{container}})", + "refId": "E" + }, + { + "expr": "sum(rate(container_network_transmit_packets_dropped_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container) OR sum(rate(windows_container_network_transmit_packets_dropped_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container)", + "interval": "", + "legendFormat": "Transmit Dropped ({{container}})", + "refId": "F" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network Traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{container}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_network_receive_bytes_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container) OR sum(rate(windows_container_network_receive_bytes_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container)", + "interval": "", + "legendFormat": "Receive Total ({{container}})", + "refId": "A" + }, + { + "expr": "sum(rate(container_network_transmit_bytes_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container) OR sum(rate(windows_container_network_transmit_bytes_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container)", + "interval": "", + "legendFormat": "Transmit Total ({{container}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{container}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 7 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_fs_writes_bytes_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container)", + "interval": "", + "legendFormat": "Write ({{container}})", + "refId": "A" + }, + { + "expr": "sum(rate(container_fs_reads_bytes_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) by (container)", + "interval": "", + "legendFormat": "Read ({{container}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "namespace", + "query": "label_values({__name__=~\"container_.*|windows_container_.*\", namespace!=\"\"}, namespace)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "pod", + "query": "label_values({__name__=~\"container_.*|windows_container_.*\", namespace=\"$namespace\", pod!=\"\"}, pod)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Rancher / Pod (Containers)", + "uid": "rancher-pod-containers-1", + "version": 8 +} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/pods/rancher-pod.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/pods/rancher-pod.json new file mode 100755 index 000000000..914a712e8 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/pods/rancher-pod.json @@ -0,0 +1,620 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 28, + "iteration": 1618265214337, + "links": [], + "panels": [ + { + "aliasColors": { + "": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_cpu_cfs_throttled_seconds_total{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\", container!=\"\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "CFS throttled", + "refId": "A" + }, + { + "expr": "sum(rate(container_cpu_system_seconds_total{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) OR sum(rate(windows_container_cpu_usage_seconds_kernelmode{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "System", + "refId": "B" + }, + { + "expr": "sum(rate(container_cpu_usage_seconds_total{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) OR sum(rate(windows_container_cpu_usage_seconds_total{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Total", + "refId": "C" + }, + { + "expr": "sum(rate(container_cpu_user_seconds_total{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) OR sum(rate(windows_container_cpu_usage_seconds_usermode{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "User", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "cpu", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\", container!=\"\"} OR windows_container_memory_usage_commit_bytes{container!=\"POD\",namespace=~\"$namespace\",pod=~\"$pod\", container!=\"\"})", + "interval": "", + "legendFormat": "Total", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_network_receive_packets_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) OR sum(rate(windows_container_network_receive_packets_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Receive Total", + "refId": "A" + }, + { + "expr": "sum(rate(container_network_transmit_packets_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) OR sum(rate(windows_container_network_transmit_packets_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Transmit Total", + "refId": "B" + }, + { + "expr": "sum(rate(container_network_receive_packets_dropped_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) OR sum(rate(windows_container_network_receive_packets_dropped_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Receive Dropped", + "refId": "C" + }, + { + "expr": "sum(rate(container_network_receive_errors_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Receive Errors", + "refId": "D" + }, + { + "expr": "sum(rate(container_network_transmit_errors_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Transmit Errors", + "refId": "E" + }, + { + "expr": "sum(rate(container_network_transmit_packets_dropped_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) OR sum(rate(windows_container_network_transmit_packets_dropped_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Transmit Dropped", + "refId": "F" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network Traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_network_receive_bytes_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) OR sum(rate(windows_container_network_receive_bytes_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Receive Total", + "refId": "A" + }, + { + "expr": "sum(rate(container_network_transmit_bytes_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval])) OR sum(rate(windows_container_network_transmit_bytes_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Transmit Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 7 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(container_fs_writes_bytes_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Write", + "refId": "A" + }, + { + "expr": "sum(rate(container_fs_reads_bytes_total{namespace=~\"$namespace\",pod=~\"$pod\",container!=\"\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "Read", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "namespace", + "query": "label_values({__name__=~\"container_.*|windows_container_.*\", namespace!=\"\"}, namespace)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "pod", + "query": "label_values({__name__=~\"container_.*|windows_container_.*\", namespace=\"$namespace\", pod!=\"\"}, pod)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Rancher / Pod", + "uid": "rancher-pod-1", + "version": 8 +} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/workloads/rancher-workload-pods.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/workloads/rancher-workload-pods.json new file mode 100755 index 000000000..b6471576b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/workloads/rancher-workload-pods.json @@ -0,0 +1,636 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 28, + "iteration": 1618265214337, + "links": [], + "panels": [ + { + "aliasColors": { + "{{pod}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(rate(container_cpu_cfs_throttled_seconds_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"}", + "interval": "", + "legendFormat": "CFS throttled ({{pod}})", + "refId": "A" + }, + { + "expr": "(sum(rate(container_cpu_system_seconds_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_cpu_usage_seconds_kernelmode{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"}", + "interval": "", + "legendFormat": "System ({{pod}})", + "refId": "B" + }, + { + "expr": "(sum(rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_cpu_usage_seconds_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"}", + "interval": "", + "legendFormat": "Total ({{pod}})", + "refId": "C" + }, + { + "expr": "(sum(rate(container_cpu_user_seconds_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_cpu_usage_seconds_usermode{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"}", + "interval": "", + "legendFormat": "User ({{pod}})", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "cpu", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{pod}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(container_memory_working_set_bytes{namespace=~\"$namespace\"} OR windows_container_memory_usage_commit_bytes{namespace=~\"$namespace\"}) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"}", + "interval": "", + "legendFormat": "({{pod}})", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{pod}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(rate(container_network_receive_packets_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_network_receive_packets_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"}", + "interval": "", + "legendFormat": "Receive Total ({{pod}})", + "refId": "A" + }, + { + "expr": "(sum(rate(container_network_transmit_packets_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_network_transmit_packets_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"}", + "interval": "", + "legendFormat": "Transmit Total ({{pod}})", + "refId": "B" + }, + { + "expr": "(sum(rate(container_network_receive_packets_dropped_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_network_receive_packets_dropped_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"}", + "interval": "", + "legendFormat": "Receive Dropped ({{pod}})", + "refId": "C" + }, + { + "expr": "(sum(rate(container_network_receive_errors_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"}", + "interval": "", + "legendFormat": "Receive Errors ({{pod}})", + "refId": "D" + }, + { + "expr": "(sum(rate(container_network_transmit_errors_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"}", + "interval": "", + "legendFormat": "Transmit Errors ({{pod}})", + "refId": "E" + }, + { + "expr": "(sum(rate(container_network_transmit_packets_dropped_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_network_transmit_packets_dropped_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"}", + "interval": "", + "legendFormat": "Transmit Dropped ({{pod}})", + "refId": "F" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network Traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{pod}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(rate(container_network_receive_bytes_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_network_receive_bytes_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"}", + "interval": "", + "legendFormat": "Receive Total ({{pod}})", + "refId": "A" + }, + { + "expr": "(sum(rate(container_network_transmit_bytes_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_network_transmit_bytes_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"}", + "interval": "", + "legendFormat": "Transmit Total ({{pod}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{pod}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 7 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(rate(container_fs_writes_bytes_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"}", + "interval": "", + "legendFormat": "Write ({{pod}})", + "refId": "A" + }, + { + "expr": "(sum(rate(container_fs_reads_bytes_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"}", + "interval": "", + "legendFormat": "Read ({{pod}})", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "namespace", + "query": "query_result(kube_pod_info{namespace!=\"\"} * on(pod) group_right(namespace, created_by_kind, created_by_name) count({__name__=~\"container_.*|windows_container_.*\", pod!=\"\"}) by (pod))", + "refresh": 2, + "regex": "/.*namespace=\"([^\"]*)\"/", + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "kind", + "query": "query_result(kube_pod_info{namespace=\"$namespace\", created_by_kind!=\"\"} * on(pod) group_right(namespace, created_by_kind, created_by_name) count({__name__=~\"container_.*|windows_container_.*\", pod!=\"\"}) by (pod))", + "refresh": 2, + "regex": "/.*created_by_kind=\"([^\"]*)\"/", + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "workload", + "query": "query_result(kube_pod_info{namespace=\"$namespace\", created_by_kind=\"$kind\", created_by_name!=\"\"} * on(pod) group_right(namespace, created_by_kind, created_by_name) count({__name__=~\"container_.*|windows_container_.*\", pod!=\"\"}) by (pod))", + "refresh": 2, + "regex": "/.*created_by_name=\"([^\"]*)\"/", + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Rancher / Workload (Pods)", + "uid": "rancher-workload-pods-1", + "version": 8 +} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/workloads/rancher-workload.json b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/workloads/rancher-workload.json new file mode 100755 index 000000000..ed352a34b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/files/rancher/workloads/rancher-workload.json @@ -0,0 +1,636 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 28, + "iteration": 1618265214337, + "links": [], + "panels": [ + { + "aliasColors": { + "{{pod}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum((sum(rate(container_cpu_cfs_throttled_seconds_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"})", + "interval": "", + "legendFormat": "CFS throttled", + "refId": "A" + }, + { + "expr": "sum((sum(rate(container_cpu_system_seconds_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_cpu_usage_seconds_kernelmode{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"})", + "interval": "", + "legendFormat": "System", + "refId": "B" + }, + { + "expr": "sum((sum(rate(container_cpu_usage_seconds_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_cpu_usage_seconds_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"})", + "interval": "", + "legendFormat": "Total", + "refId": "C" + }, + { + "expr": "sum((sum(rate(container_cpu_user_seconds_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_cpu_usage_seconds_usermode{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"})", + "interval": "", + "legendFormat": "User", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "CPU Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "cpu", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{pod}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum((sum(container_memory_working_set_bytes{namespace=~\"$namespace\"} OR windows_container_memory_usage_commit_bytes{namespace=~\"$namespace\"}) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"})", + "interval": "", + "legendFormat": "Total", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Memory Utilization", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{pod}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "hiddenSeries": false, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum((sum(rate(container_network_receive_packets_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_network_receive_packets_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"})", + "interval": "", + "legendFormat": "Receive Total", + "refId": "A" + }, + { + "expr": "sum((sum(rate(container_network_transmit_packets_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_network_transmit_packets_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"})", + "interval": "", + "legendFormat": "Transmit Total", + "refId": "B" + }, + { + "expr": "sum((sum(rate(container_network_receive_packets_dropped_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_network_receive_packets_dropped_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"})", + "interval": "", + "legendFormat": "Receive Dropped", + "refId": "C" + }, + { + "expr": "sum((sum(rate(container_network_receive_errors_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"})", + "interval": "", + "legendFormat": "Receive Errors", + "refId": "D" + }, + { + "expr": "sum((sum(rate(container_network_transmit_errors_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"})", + "interval": "", + "legendFormat": "Transmit Errors", + "refId": "E" + }, + { + "expr": "sum((sum(rate(container_network_transmit_packets_dropped_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_network_transmit_packets_dropped_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"})", + "interval": "", + "legendFormat": "Transmit Dropped", + "refId": "F" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network Traffic", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{pod}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum((sum(rate(container_network_receive_bytes_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_network_receive_bytes_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"})", + "interval": "", + "legendFormat": "Receive Total", + "refId": "A" + }, + { + "expr": "sum((sum(rate(container_network_transmit_bytes_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod) OR sum(rate(windows_container_network_transmit_bytes_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"})", + "interval": "", + "legendFormat": "Transmit Total", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "{{pod}}": "#3797d5" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 7 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.5", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum((sum(rate(container_fs_writes_bytes_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"})", + "interval": "", + "legendFormat": "Write", + "refId": "A" + }, + { + "expr": "sum((sum(rate(container_fs_reads_bytes_total{namespace=~\"$namespace\"}[$__rate_interval])) by (pod)) * on(pod) kube_pod_info{namespace=~\"$namespace\", created_by_kind=\"$kind\", created_by_name=\"$workload\"})", + "interval": "", + "legendFormat": "Read", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 1, + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": false, + "schemaVersion": 26, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "allValue": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "namespace", + "query": "query_result(kube_pod_info{namespace!=\"\"} * on(pod) group_right(namespace, created_by_kind, created_by_name) count({__name__=~\"container_.*|windows_container_.*\", pod!=\"\"}) by (pod))", + "refresh": 2, + "regex": "/.*namespace=\"([^\"]*)\"/", + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "kind", + "query": "query_result(kube_pod_info{namespace=\"$namespace\", created_by_kind!=\"\"} * on(pod) group_right(namespace, created_by_kind, created_by_name) count({__name__=~\"container_.*|windows_container_.*\", pod!=\"\"}) by (pod))", + "refresh": 2, + "regex": "/.*created_by_kind=\"([^\"]*)\"/", + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "workload", + "query": "query_result(kube_pod_info{namespace=\"$namespace\", created_by_kind=\"$kind\", created_by_name!=\"\"} * on(pod) group_right(namespace, created_by_kind, created_by_name) count({__name__=~\"container_.*|windows_container_.*\", pod!=\"\"}) by (pod))", + "refresh": 2, + "regex": "/.*created_by_name=\"([^\"]*)\"/", + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "Rancher / Workload", + "uid": "rancher-workload-1", + "version": 8 +} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/NOTES.txt b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/NOTES.txt new file mode 100755 index 000000000..371f3ae39 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/NOTES.txt @@ -0,0 +1,4 @@ +{{ $.Chart.Name }} has been installed. Check its status by running: + kubectl --namespace {{ template "kube-prometheus-stack.namespace" . }} get pods -l "release={{ $.Release.Name }}" + +Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator. diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/_helpers.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/_helpers.tpl new file mode 100755 index 000000000..123cbad6d --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/_helpers.tpl @@ -0,0 +1,200 @@ +# Rancher +{{- define "system_default_registry" -}} +{{- if .Values.global.cattle.systemDefaultRegistry -}} +{{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} +{{- end -}} +{{- end -}} + +{{/* +https://github.com/helm/helm/issues/4535#issuecomment-477778391 +Usage: {{ include "call-nested" (list . "SUBCHART_NAME" "TEMPLATE") }} +e.g. {{ include "call-nested" (list . "grafana" "grafana.fullname") }} +*/}} +{{- define "call-nested" }} +{{- $dot := index . 0 }} +{{- $subchart := index . 1 | splitList "." }} +{{- $template := index . 2 }} +{{- $values := $dot.Values }} +{{- range $subchart }} +{{- $values = index $values . }} +{{- end }} +{{- include $template (dict "Chart" (dict "Name" (last $subchart)) "Values" $values "Release" $dot.Release "Capabilities" $dot.Capabilities) }} +{{- end }} + +# Special Exporters +{{- define "exporter.kubeEtcd.enabled" -}} +{{- if or .Values.kubeEtcd.enabled .Values.rkeEtcd.enabled .Values.kubeAdmEtcd.enabled .Values.rke2Etcd.enabled -}} +"true" +{{- end -}} +{{- end }} + +{{- define "exporter.kubeControllerManager.enabled" -}} +{{- if or .Values.kubeControllerManager.enabled .Values.rkeControllerManager.enabled .Values.k3sServer.enabled .Values.kubeAdmControllerManager.enabled .Values.rke2ControllerManager.enabled -}} +"true" +{{- end -}} +{{- end }} + +{{- define "exporter.kubeScheduler.enabled" -}} +{{- if or .Values.kubeScheduler.enabled .Values.rkeScheduler.enabled .Values.k3sServer.enabled .Values.kubeAdmScheduler.enabled .Values.rke2Scheduler.enabled -}} +"true" +{{- end -}} +{{- end }} + +{{- define "exporter.kubeProxy.enabled" -}} +{{- if or .Values.kubeProxy.enabled .Values.rkeProxy.enabled .Values.k3sServer.enabled .Values.kubeAdmProxy.enabled .Values.rke2Proxy.enabled -}} +"true" +{{- end -}} +{{- end }} + +{{- define "exporter.kubeControllerManager.jobName" -}} +{{- if .Values.k3sServer.enabled -}} +k3s-server +{{- else -}} +kube-controller-manager +{{- end -}} +{{- end }} + +{{- define "exporter.kubeScheduler.jobName" -}} +{{- if .Values.k3sServer.enabled -}} +k3s-server +{{- else -}} +kube-scheduler +{{- end -}} +{{- end }} + +{{- define "exporter.kubeProxy.jobName" -}} +{{- if .Values.k3sServer.enabled -}} +k3s-server +{{- else -}} +kube-proxy +{{- end -}} +{{- end }} + +{{- define "kubelet.serviceMonitor.resourcePath" -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if not (eq .Values.kubelet.serviceMonitor.resourcePath "/metrics/resource/v1alpha1") -}} +{{ .Values.kubelet.serviceMonitor.resourcePath }} +{{- else if semverCompare ">=1.20.0-0" $kubeTargetVersion -}} +/metrics/resource +{{- else -}} +/metrics/resource/v1alpha1 +{{- end -}} +{{- end }} + +# Windows Support + +{{/* +Windows cluster will add default taint for linux nodes, +add below linux tolerations to workloads could be scheduled to those linux nodes +*/}} + +{{- define "linux-node-tolerations" -}} +- key: "cattle.io/os" + value: "linux" + effect: "NoSchedule" + operator: "Equal" +{{- end -}} + +{{- define "linux-node-selector" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +beta.kubernetes.io/os: linux +{{- else -}} +kubernetes.io/os: linux +{{- end -}} +{{- end -}} + +# Prometheus Operator + +{{/* vim: set filetype=mustache: */}} +{{/* Expand the name of the chart. This is suffixed with -alertmanager, which means subtract 13 from longest 63 available */}} +{{- define "kube-prometheus-stack.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 50 | trimSuffix "-" -}} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +The components in this chart create additional resources that expand the longest created name strings. +The longest name that gets created adds and extra 37 characters, so truncation should be 63-35=26. +*/}} +{{- define "kube-prometheus-stack.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 26 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 26 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 26 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* Fullname suffixed with operator */}} +{{- define "kube-prometheus-stack.operator.fullname" -}} +{{- printf "%s-operator" (include "kube-prometheus-stack.fullname" .) -}} +{{- end }} + +{{/* Fullname suffixed with prometheus */}} +{{- define "kube-prometheus-stack.prometheus.fullname" -}} +{{- printf "%s-prometheus" (include "kube-prometheus-stack.fullname" .) -}} +{{- end }} + +{{/* Fullname suffixed with alertmanager */}} +{{- define "kube-prometheus-stack.alertmanager.fullname" -}} +{{- printf "%s-alertmanager" (include "kube-prometheus-stack.fullname" .) -}} +{{- end }} + +{{/* Create chart name and version as used by the chart label. */}} +{{- define "kube-prometheus-stack.chartref" -}} +{{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}} +{{- end }} + +{{/* Generate basic labels */}} +{{- define "kube-prometheus-stack.labels" }} +chart: {{ template "kube-prometheus-stack.chartref" . }} +release: {{ $.Release.Name | quote }} +heritage: {{ $.Release.Service | quote }} +{{- if .Values.commonLabels}} +{{ toYaml .Values.commonLabels }} +{{- end }} +{{- end }} + +{{/* Create the name of kube-prometheus-stack service account to use */}} +{{- define "kube-prometheus-stack.operator.serviceAccountName" -}} +{{- if .Values.prometheusOperator.serviceAccount.create -}} + {{ default (include "kube-prometheus-stack.operator.fullname" .) .Values.prometheusOperator.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.prometheusOperator.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* Create the name of prometheus service account to use */}} +{{- define "kube-prometheus-stack.prometheus.serviceAccountName" -}} +{{- if .Values.prometheus.serviceAccount.create -}} + {{ default (include "kube-prometheus-stack.prometheus.fullname" .) .Values.prometheus.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.prometheus.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* Create the name of alertmanager service account to use */}} +{{- define "kube-prometheus-stack.alertmanager.serviceAccountName" -}} +{{- if .Values.alertmanager.serviceAccount.create -}} + {{ default (include "kube-prometheus-stack.alertmanager.fullname" .) .Values.alertmanager.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.alertmanager.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "kube-prometheus-stack.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/alertmanager.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/alertmanager.yaml new file mode 100755 index 000000000..8967c86ff --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/alertmanager.yaml @@ -0,0 +1,147 @@ +{{- if .Values.alertmanager.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: Alertmanager +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: +{{- if .Values.alertmanager.alertmanagerSpec.image }} + image: {{ template "system_default_registry" . }}{{ .Values.alertmanager.alertmanagerSpec.image.repository }}:{{ .Values.alertmanager.alertmanagerSpec.image.tag }} + version: {{ .Values.alertmanager.alertmanagerSpec.image.tag }} + {{- if .Values.alertmanager.alertmanagerSpec.image.sha }} + sha: {{ .Values.alertmanager.alertmanagerSpec.image.sha }} + {{- end }} +{{- end }} + replicas: {{ .Values.alertmanager.alertmanagerSpec.replicas }} + listenLocal: {{ .Values.alertmanager.alertmanagerSpec.listenLocal }} + serviceAccountName: {{ template "kube-prometheus-stack.alertmanager.serviceAccountName" . }} +{{- if .Values.alertmanager.alertmanagerSpec.externalUrl }} + externalUrl: "{{ tpl .Values.alertmanager.alertmanagerSpec.externalUrl . }}" +{{- else if and .Values.alertmanager.ingress.enabled .Values.alertmanager.ingress.hosts }} + externalUrl: "http://{{ tpl (index .Values.alertmanager.ingress.hosts 0) . }}{{ .Values.alertmanager.alertmanagerSpec.routePrefix }}" +{{- else if not (or (kindIs "invalid" .Values.global.cattle.url) (kindIs "invalid" .Values.global.cattle.clusterId)) }} + externalUrl: "{{ .Values.global.cattle.url }}/k8s/clusters/{{ .Values.global.cattle.clusterId }}/api/v1/namespaces/{{ .Values.namespaceOverride }}/services/http:{{ template "kube-prometheus-stack.fullname" . }}-alertmanager:{{ .Values.alertmanager.service.port }}/proxy" +{{- else }} + externalUrl: http://{{ template "kube-prometheus-stack.fullname" . }}-alertmanager.{{ template "kube-prometheus-stack.namespace" . }}:{{ .Values.alertmanager.service.port }} +{{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 4 }} +{{- if .Values.alertmanager.alertmanagerSpec.nodeSelector }} +{{ toYaml .Values.alertmanager.alertmanagerSpec.nodeSelector | indent 4 }} +{{- end }} + paused: {{ .Values.alertmanager.alertmanagerSpec.paused }} + logFormat: {{ .Values.alertmanager.alertmanagerSpec.logFormat | quote }} + logLevel: {{ .Values.alertmanager.alertmanagerSpec.logLevel | quote }} + retention: {{ .Values.alertmanager.alertmanagerSpec.retention | quote }} +{{- if .Values.alertmanager.alertmanagerSpec.secrets }} + secrets: +{{ toYaml .Values.alertmanager.alertmanagerSpec.secrets | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.configSecret }} + configSecret: {{ .Values.alertmanager.alertmanagerSpec.configSecret }} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.configMaps }} + configMaps: +{{ toYaml .Values.alertmanager.alertmanagerSpec.configMaps | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.alertmanagerConfigSelector }} + alertmanagerConfigSelector: +{{ toYaml .Values.alertmanager.alertmanagerSpec.alertmanagerConfigSelector | indent 4}} +{{ else }} + alertmanagerConfigSelector: {} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.alertmanagerConfigNamespaceSelector }} + alertmanagerConfigNamespaceSelector: +{{ toYaml .Values.alertmanager.alertmanagerSpec.alertmanagerConfigNamespaceSelector | indent 4}} +{{ else }} + alertmanagerConfigNamespaceSelector: {} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.resources }} + resources: +{{ toYaml .Values.alertmanager.alertmanagerSpec.resources | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.routePrefix }} + routePrefix: "{{ .Values.alertmanager.alertmanagerSpec.routePrefix }}" +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.securityContext }} + securityContext: +{{ toYaml .Values.alertmanager.alertmanagerSpec.securityContext | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.storage }} + storage: +{{ toYaml .Values.alertmanager.alertmanagerSpec.storage | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.podMetadata }} + podMetadata: +{{ toYaml .Values.alertmanager.alertmanagerSpec.podMetadata | indent 4 }} +{{- end }} +{{- if or .Values.alertmanager.alertmanagerSpec.podAntiAffinity .Values.alertmanager.alertmanagerSpec.affinity }} + affinity: +{{- if .Values.alertmanager.alertmanagerSpec.affinity }} +{{ toYaml .Values.alertmanager.alertmanagerSpec.affinity | indent 4 }} +{{- end }} +{{- if eq .Values.alertmanager.alertmanagerSpec.podAntiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.alertmanager.alertmanagerSpec.podAntiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - {key: app, operator: In, values: [alertmanager]} + - {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.fullname" . }}-alertmanager]} +{{- else if eq .Values.alertmanager.alertmanagerSpec.podAntiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.alertmanager.alertmanagerSpec.podAntiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - {key: app, operator: In, values: [alertmanager]} + - {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.fullname" . }}-alertmanager]} +{{- end }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 4 }} +{{- if .Values.alertmanager.alertmanagerSpec.tolerations }} +{{ toYaml .Values.alertmanager.alertmanagerSpec.tolerations | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.alertmanager.alertmanagerSpec.topologySpreadConstraints | indent 4 }} +{{- end }} +{{- if .Values.global.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.global.imagePullSecrets | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.containers }} + containers: +{{ toYaml .Values.alertmanager.alertmanagerSpec.containers | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.initContainers }} + initContainers: +{{ toYaml .Values.alertmanager.alertmanagerSpec.initContainers | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.priorityClassName }} + priorityClassName: {{.Values.alertmanager.alertmanagerSpec.priorityClassName }} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.additionalPeers }} + additionalPeers: +{{ toYaml .Values.alertmanager.alertmanagerSpec.additionalPeers | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.volumes }} + volumes: +{{ toYaml .Values.alertmanager.alertmanagerSpec.volumes | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.volumeMounts }} + volumeMounts: +{{ toYaml .Values.alertmanager.alertmanagerSpec.volumeMounts | indent 4 }} +{{- end }} + portName: {{ .Values.alertmanager.alertmanagerSpec.portName }} +{{- if .Values.alertmanager.alertmanagerSpec.clusterAdvertiseAddress }} + clusterAdvertiseAddress: {{ .Values.alertmanager.alertmanagerSpec.clusterAdvertiseAddress }} +{{- end }} +{{- if .Values.alertmanager.alertmanagerSpec.forceEnableClusterMode }} + forceEnableClusterMode: {{ .Values.alertmanager.alertmanagerSpec.forceEnableClusterMode }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/cleanupSecret.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/cleanupSecret.yaml new file mode 100755 index 000000000..908955697 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/cleanupSecret.yaml @@ -0,0 +1,88 @@ +{{- if and (.Values.alertmanager.enabled) (not .Values.alertmanager.alertmanagerSpec.useExistingSecret) (.Values.alertmanager.secret.cleanupOnUninstall) }} +apiVersion: batch/v1 +kind: Job +metadata: + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: +{{ include "kube-prometheus-stack.labels" . | indent 4 }} + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": hook-succeeded, hook-failed + "helm.sh/hook-weight": "5" +spec: + template: + metadata: + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete + labels: {{ include "kube-prometheus-stack.labels" . | nindent 8 }} + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + spec: + serviceAccountName: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete +{{- if .Values.alertmanager.secret.securityContext }} + securityContext: +{{ toYaml .Values.alertmanager.secret.securityContext | indent 8 }} +{{- end }} + containers: + - name: delete-secret + image: {{ template "system_default_registry" . }}{{ .Values.alertmanager.secret.image.repository }}:{{ .Values.alertmanager.secret.image.tag }} + imagePullPolicy: {{ .Values.alertmanager.secret.image.pullPolicy }} + command: + - /bin/sh + - -c + - > + if kubectl get secret -n {{ template "kube-prometheus-stack.namespace" . }} alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-alertmanager > /dev/null 2>&1; then + kubectl delete secret -n {{ template "kube-prometheus-stack.namespace" . }} alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-alertmanager + fi; + restartPolicy: OnFailure + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": hook-succeeded, hook-failed + "helm.sh/hook-weight": "3" +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: ['get', 'delete'] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": hook-succeeded, hook-failed + "helm.sh/hook-weight": "3" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete +subjects: +- kind: ServiceAccount + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete + namespace: {{ template "kube-prometheus-stack.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-post-delete + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + annotations: + "helm.sh/hook": post-delete + "helm.sh/hook-delete-policy": hook-succeeded, hook-failed + "helm.sh/hook-weight": "3" +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/ingress.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/ingress.yaml new file mode 100755 index 000000000..50fab1455 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/ingress.yaml @@ -0,0 +1,65 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled }} +{{- $pathType := .Values.alertmanager.ingress.pathType | default "" }} +{{- $serviceName := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "alertmanager" }} +{{- $servicePort := .Values.alertmanager.service.port -}} +{{- $routePrefix := list .Values.alertmanager.alertmanagerSpec.routePrefix }} +{{- $paths := .Values.alertmanager.ingress.paths | default $routePrefix -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +apiVersion: networking.k8s.io/v1beta1 +{{ else }} +apiVersion: extensions/v1beta1 +{{ end -}} +kind: Ingress +metadata: + name: {{ $serviceName }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} +{{- if .Values.alertmanager.ingress.annotations }} + annotations: +{{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }} +{{- end }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager +{{- if .Values.alertmanager.ingress.labels }} +{{ toYaml .Values.alertmanager.ingress.labels | indent 4 }} +{{- end }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + {{- if or (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") }} + {{- if .Values.alertmanager.ingress.ingressClassName }} + ingressClassName: {{ .Values.alertmanager.ingress.ingressClassName }} + {{- end }} + {{- end }} + rules: + {{- if .Values.alertmanager.ingress.hosts }} + {{- range $host := .Values.alertmanager.ingress.hosts }} + - host: {{ tpl $host $ }} + http: + paths: + {{- range $p := $paths }} + - path: {{ tpl $p $ }} + {{- if $pathType }} + pathType: {{ $pathType }} + {{- end }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} + {{- end -}} + {{- else }} + - http: + paths: + {{- range $p := $paths }} + - path: {{ tpl $p $ }} + {{- if $pathType }} + pathType: {{ $pathType }} + {{- end }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} + {{- end -}} + {{- if .Values.alertmanager.ingress.tls }} + tls: +{{ tpl (toYaml .Values.alertmanager.ingress.tls | indent 4) . }} + {{- end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/ingressperreplica.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/ingressperreplica.yaml new file mode 100755 index 000000000..3d673b2c8 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/ingressperreplica.yaml @@ -0,0 +1,62 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.servicePerReplica.enabled .Values.alertmanager.ingressPerReplica.enabled }} +{{- $pathType := .Values.alertmanager.ingressPerReplica.pathType | default "" }} +{{- $count := .Values.alertmanager.alertmanagerSpec.replicas | int -}} +{{- $servicePort := .Values.alertmanager.service.port -}} +{{- $ingressValues := .Values.alertmanager.ingressPerReplica -}} +apiVersion: v1 +kind: List +metadata: + name: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-ingressperreplica + namespace: {{ template "kube-prometheus-stack.namespace" . }} +items: +{{ range $i, $e := until $count }} + - kind: Ingress + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} + apiVersion: networking.k8s.io/v1beta1 + {{ else }} + apiVersion: extensions/v1beta1 + {{ end -}} + metadata: + name: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-{{ $i }} + namespace: {{ template "kube-prometheus-stack.namespace" $ }} + labels: + app: {{ include "kube-prometheus-stack.name" $ }}-alertmanager +{{ include "kube-prometheus-stack.labels" $ | indent 8 }} + {{- if $ingressValues.labels }} +{{ toYaml $ingressValues.labels | indent 8 }} + {{- end }} + {{- if $ingressValues.annotations }} + annotations: +{{ toYaml $ingressValues.annotations | indent 8 }} + {{- end }} + spec: + {{- if or ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") ($.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") }} + {{- if $ingressValues.ingressClassName }} + ingressClassName: {{ $ingressValues.ingressClassName }} + {{- end }} + {{- end }} + rules: + - host: {{ $ingressValues.hostPrefix }}-{{ $i }}.{{ $ingressValues.hostDomain }} + http: + paths: + {{- range $p := $ingressValues.paths }} + - path: {{ tpl $p $ }} + {{- if $pathType }} + pathType: {{ $pathType }} + {{- end }} + backend: + serviceName: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-{{ $i }} + servicePort: {{ $servicePort }} + {{- end -}} + {{- if or $ingressValues.tlsSecretName $ingressValues.tlsSecretPerReplica.enabled }} + tls: + - hosts: + - {{ $ingressValues.hostPrefix }}-{{ $i }}.{{ $ingressValues.hostDomain }} + {{- if $ingressValues.tlsSecretPerReplica.enabled }} + secretName: {{ $ingressValues.tlsSecretPerReplica.prefix }}-{{ $i }} + {{- else }} + secretName: {{ $ingressValues.tlsSecretName }} + {{- end }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/podDisruptionBudget.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/podDisruptionBudget.yaml new file mode 100755 index 000000000..1dbe809cd --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/podDisruptionBudget.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + {{- if .Values.alertmanager.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.alertmanager.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.alertmanager.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.alertmanager.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: alertmanager + alertmanager: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/psp-role.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/psp-role.yaml new file mode 100755 index 000000000..d64d1f813 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/psp-role.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +rules: +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} +- apiGroups: ['policy'] +{{- else }} +- apiGroups: ['extensions'] +{{- end }} + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "kube-prometheus-stack.fullname" . }}-alertmanager +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/psp-rolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/psp-rolebinding.yaml new file mode 100755 index 000000000..9248cc8dd --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/psp-rolebinding.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager +subjects: + - kind: ServiceAccount + name: {{ template "kube-prometheus-stack.alertmanager.serviceAccountName" . }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/psp.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/psp.yaml new file mode 100755 index 000000000..6fa445009 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/psp.yaml @@ -0,0 +1,52 @@ +{{- if and .Values.alertmanager.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager +{{- if .Values.global.rbac.pspAnnotations }} + annotations: +{{ toYaml .Values.global.rbac.pspAnnotations | indent 4 }} +{{- end }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + privileged: false + # Required to prevent escalations to root. + # allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + #requiredDropCapabilities: + # - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Permits the container to run with root privileges as well. + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} + diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/secret.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/secret.yaml new file mode 100755 index 000000000..9d9bdabfa --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/secret.yaml @@ -0,0 +1,166 @@ +{{- if and (.Values.alertmanager.enabled) (not .Values.alertmanager.alertmanagerSpec.useExistingSecret) }} +{{- if .Release.IsInstall }} +{{- $secretName := (printf "alertmanager-%s-alertmanager" (include "kube-prometheus-stack.fullname" .)) }} +{{- if (lookup "v1" "Secret" (include "kube-prometheus-stack.namespace" .) $secretName) }} +{{- required (printf "Cannot overwrite existing secret %s in namespace %s." $secretName (include "kube-prometheus-stack.namespace" .)) "" }} +{{- end }}{{- end }} +apiVersion: v1 +kind: Secret +metadata: + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install + namespace: {{ template "kube-prometheus-stack.namespace" . }} + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-delete-policy": hook-succeeded, hook-failed + "helm.sh/hook-weight": "3" +{{- if .Values.alertmanager.secret.annotations }} +{{ toYaml .Values.alertmanager.secret.annotations | indent 4 }} +{{- end }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +data: +{{- if .Values.alertmanager.tplConfig }} + alertmanager.yaml: {{ tpl (toYaml .Values.alertmanager.config) . | b64enc | quote }} +{{- else }} + alertmanager.yaml: {{ toYaml .Values.alertmanager.config | b64enc | quote }} +{{- end}} +{{- range $key, $val := .Values.alertmanager.templateFiles }} + {{ $key }}: {{ $val | b64enc | quote }} +{{- end }} +--- +apiVersion: batch/v1 +kind: Job +metadata: + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: +{{ include "kube-prometheus-stack.labels" . | indent 4 }} + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-delete-policy": hook-succeeded, hook-failed + "helm.sh/hook-weight": "5" +spec: + template: + metadata: + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install + labels: {{ include "kube-prometheus-stack.labels" . | nindent 8 }} + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + spec: + serviceAccountName: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install +{{- if .Values.alertmanager.secret.securityContext }} + securityContext: +{{ toYaml .Values.alertmanager.secret.securityContext | indent 8 }} +{{- end }} + containers: + - name: copy-pre-install-secret + image: {{ template "system_default_registry" . }}{{ .Values.alertmanager.secret.image.repository }}:{{ .Values.alertmanager.secret.image.tag }} + imagePullPolicy: {{ .Values.alertmanager.secret.image.pullPolicy }} + command: + - /bin/sh + - -c + - > + if kubectl get secret -n {{ template "kube-prometheus-stack.namespace" . }} alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-alertmanager > /dev/null 2>&1; then + echo "Secret already exists" + exit 1 + fi; + kubectl patch secret -n {{ template "kube-prometheus-stack.namespace" . }} --dry-run -o yaml + alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install + -p '{{ printf "{\"metadata\":{\"name\": \"alertmanager-%s-alertmanager\"}}" (include "kube-prometheus-stack.fullname" .) }}' + | kubectl apply -f -; + kubectl annotate secret -n {{ template "kube-prometheus-stack.namespace" . }} + alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-alertmanager + helm.sh/hook- helm.sh/hook-delete-policy- helm.sh/hook-weight-; + restartPolicy: OnFailure + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-delete-policy": hook-succeeded, hook-failed + "helm.sh/hook-weight": "3" +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: ['create', 'get', 'patch'] +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-delete-policy": hook-succeeded, hook-failed + "helm.sh/hook-weight": "3" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install +subjects: +- kind: ServiceAccount + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install + namespace: {{ template "kube-prometheus-stack.namespace" . }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-delete-policy": hook-succeeded, hook-failed + "helm.sh/hook-weight": "3" +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: alertmanager-{{ template "kube-prometheus-stack.fullname" . }}-pre-install + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + annotations: + "helm.sh/hook": pre-install + "helm.sh/hook-delete-policy": hook-succeeded, hook-failed + "helm.sh/hook-weight": "3" +spec: + privileged: false + allowPrivilegeEscalation: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/service.yaml new file mode 100755 index 000000000..bbcc60f2b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/service.yaml @@ -0,0 +1,50 @@ +{{- if .Values.alertmanager.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + self-monitor: {{ .Values.alertmanager.serviceMonitor.selfMonitor | quote }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.alertmanager.service.labels }} +{{ toYaml .Values.alertmanager.service.labels | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.service.annotations }} + annotations: +{{ toYaml .Values.alertmanager.service.annotations | indent 4 }} +{{- end }} +spec: +{{- if .Values.alertmanager.service.clusterIP }} + clusterIP: {{ .Values.alertmanager.service.clusterIP }} +{{- end }} +{{- if .Values.alertmanager.service.externalIPs }} + externalIPs: +{{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: {{ .Values.alertmanager.alertmanagerSpec.portName }} + {{- if eq .Values.alertmanager.service.type "NodePort" }} + nodePort: {{ .Values.alertmanager.service.nodePort }} + {{- end }} + port: {{ .Values.alertmanager.service.port }} + targetPort: {{ .Values.alertmanager.service.targetPort }} + protocol: TCP +{{- if .Values.alertmanager.service.additionalPorts }} +{{ toYaml .Values.alertmanager.service.additionalPorts | indent 2 }} +{{- end }} + selector: + app: alertmanager + alertmanager: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager + type: "{{ .Values.alertmanager.service.type }}" +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/serviceaccount.yaml new file mode 100755 index 000000000..c5e6e9228 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kube-prometheus-stack.alertmanager.serviceAccountName" . }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.alertmanager.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.alertmanager.serviceAccount.annotations | indent 4 }} +{{- end }} +imagePullSecrets: +{{ toYaml .Values.global.imagePullSecrets | indent 2 }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/servicemonitor.yaml new file mode 100755 index 000000000..a699accb8 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.serviceMonitor.selfMonitor }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + selector: + matchLabels: + app: {{ template "kube-prometheus-stack.name" . }}-alertmanager + release: {{ $.Release.Name | quote }} + self-monitor: "true" + namespaceSelector: + matchNames: + - {{ printf "%s" (include "kube-prometheus-stack.namespace" .) | quote }} + endpoints: + - port: {{ .Values.alertmanager.alertmanagerSpec.portName }} + {{- if .Values.alertmanager.serviceMonitor.interval }} + interval: {{ .Values.alertmanager.serviceMonitor.interval }} + {{- end }} + {{- if .Values.alertmanager.serviceMonitor.scheme }} + scheme: {{ .Values.alertmanager.serviceMonitor.scheme }} + {{- end }} + {{- if .Values.alertmanager.serviceMonitor.bearerTokenFile }} + bearerTokenFile: {{ .Values.alertmanager.serviceMonitor.bearerTokenFile }} + {{- end }} + {{- if .Values.alertmanager.serviceMonitor.tlsConfig }} + tlsConfig: {{ toYaml .Values.alertmanager.serviceMonitor.tlsConfig | nindent 6 }} + {{- end }} + path: "{{ trimSuffix "/" .Values.alertmanager.alertmanagerSpec.routePrefix }}/metrics" +{{- if .Values.alertmanager.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.alertmanager.serviceMonitor.metricRelabelings | indent 6) . }} +{{- end }} +{{- if .Values.alertmanager.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.alertmanager.serviceMonitor.relabelings | indent 6 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/serviceperreplica.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/serviceperreplica.yaml new file mode 100755 index 000000000..0f12ae879 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/alertmanager/serviceperreplica.yaml @@ -0,0 +1,46 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.servicePerReplica.enabled }} +{{- $count := .Values.alertmanager.alertmanagerSpec.replicas | int -}} +{{- $serviceValues := .Values.alertmanager.servicePerReplica -}} +apiVersion: v1 +kind: List +metadata: + name: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-serviceperreplica + namespace: {{ template "kube-prometheus-stack.namespace" . }} +items: +{{- range $i, $e := until $count }} + - apiVersion: v1 + kind: Service + metadata: + name: {{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-{{ $i }} + namespace: {{ template "kube-prometheus-stack.namespace" $ }} + labels: + app: {{ include "kube-prometheus-stack.name" $ }}-alertmanager +{{ include "kube-prometheus-stack.labels" $ | indent 8 }} + {{- if $serviceValues.annotations }} + annotations: +{{ toYaml $serviceValues.annotations | indent 8 }} + {{- end }} + spec: + {{- if $serviceValues.clusterIP }} + clusterIP: {{ $serviceValues.clusterIP }} + {{- end }} + {{- if $serviceValues.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := $serviceValues.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} + {{- end }} + ports: + - name: {{ $.Values.alertmanager.alertmanagerSpec.portName }} + {{- if eq $serviceValues.type "NodePort" }} + nodePort: {{ $serviceValues.nodePort }} + {{- end }} + port: {{ $serviceValues.port }} + targetPort: {{ $serviceValues.targetPort }} + selector: + app: alertmanager + alertmanager: {{ template "kube-prometheus-stack.fullname" $ }}-alertmanager + statefulset.kubernetes.io/pod-name: alertmanager-{{ include "kube-prometheus-stack.fullname" $ }}-alertmanager-{{ $i }} + type: "{{ $serviceValues.type }}" +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/core-dns/service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/core-dns/service.yaml new file mode 100755 index 000000000..f77db4199 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/core-dns/service.yaml @@ -0,0 +1,24 @@ +{{- if .Values.coreDns.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-coredns + labels: + app: {{ template "kube-prometheus-stack.name" . }}-coredns + jobLabel: coredns +{{ include "kube-prometheus-stack.labels" . | indent 4 }} + namespace: kube-system +spec: + clusterIP: None + ports: + - name: http-metrics + port: {{ .Values.coreDns.service.port }} + protocol: TCP + targetPort: {{ .Values.coreDns.service.targetPort }} + selector: + {{- if .Values.coreDns.service.selector }} +{{ toYaml .Values.coreDns.service.selector | indent 4 }} + {{- else}} + k8s-app: kube-dns + {{- end}} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/core-dns/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/core-dns/servicemonitor.yaml new file mode 100755 index 000000000..f34549048 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/core-dns/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.coreDns.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-coredns + namespace: "kube-system" + labels: + app: {{ template "kube-prometheus-stack.name" . }}-coredns +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + jobLabel: jobLabel + selector: + matchLabels: + app: {{ template "kube-prometheus-stack.name" . }}-coredns + release: {{ $.Release.Name | quote }} + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: http-metrics + {{- if .Values.coreDns.serviceMonitor.interval}} + interval: {{ .Values.coreDns.serviceMonitor.interval }} + {{- end }} + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +{{- if .Values.coreDns.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.coreDns.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.coreDns.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.coreDns.serviceMonitor.relabelings | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-api-server/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-api-server/servicemonitor.yaml new file mode 100755 index 000000000..b7ea3817c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-api-server/servicemonitor.yaml @@ -0,0 +1,36 @@ +{{- if .Values.kubeApiServer.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-apiserver + namespace: default + labels: + app: {{ template "kube-prometheus-stack.name" . }}-apiserver +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if .Values.kubeApiServer.serviceMonitor.interval }} + interval: {{ .Values.kubeApiServer.serviceMonitor.interval }} + {{- end }} + port: https + scheme: https +{{- if .Values.kubeApiServer.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubeApiServer.serviceMonitor.metricRelabelings | indent 6) . }} +{{- end }} +{{- if .Values.kubeApiServer.relabelings }} + relabelings: +{{ toYaml .Values.kubeApiServer.relabelings | indent 6 }} +{{- end }} + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + serverName: {{ .Values.kubeApiServer.tlsConfig.serverName }} + insecureSkipVerify: {{ .Values.kubeApiServer.tlsConfig.insecureSkipVerify }} + jobLabel: {{ .Values.kubeApiServer.serviceMonitor.jobLabel }} + namespaceSelector: + matchNames: + - default + selector: +{{ toYaml .Values.kubeApiServer.serviceMonitor.selector | indent 4 }} +{{- end}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-controller-manager/endpoints.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-controller-manager/endpoints.yaml new file mode 100755 index 000000000..413193028 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-controller-manager/endpoints.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.kubeControllerManager.enabled .Values.kubeControllerManager.endpoints }} +apiVersion: v1 +kind: Endpoints +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kube-controller-manager + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-controller-manager + k8s-app: kube-controller-manager +{{ include "kube-prometheus-stack.labels" . | indent 4 }} + namespace: kube-system +subsets: + - addresses: + {{- range .Values.kubeControllerManager.endpoints }} + - ip: {{ . }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.kubeControllerManager.service.port }} + protocol: TCP +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-controller-manager/service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-controller-manager/service.yaml new file mode 100755 index 000000000..d55ca2a10 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-controller-manager/service.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.kubeControllerManager.enabled .Values.kubeControllerManager.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kube-controller-manager + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-controller-manager + jobLabel: kube-controller-manager +{{ include "kube-prometheus-stack.labels" . | indent 4 }} + namespace: kube-system +spec: + clusterIP: None + ports: + - name: http-metrics + port: {{ .Values.kubeControllerManager.service.port }} + protocol: TCP + targetPort: {{ .Values.kubeControllerManager.service.targetPort }} +{{- if .Values.kubeControllerManager.endpoints }}{{- else }} + selector: + {{- if .Values.kubeControllerManager.service.selector }} +{{ toYaml .Values.kubeControllerManager.service.selector | indent 4 }} + {{- else}} + component: kube-controller-manager + {{- end}} +{{- end }} + type: ClusterIP +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-controller-manager/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-controller-manager/servicemonitor.yaml new file mode 100755 index 000000000..38e2b1970 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-controller-manager/servicemonitor.yaml @@ -0,0 +1,44 @@ +{{- if and .Values.kubeControllerManager.enabled .Values.kubeControllerManager.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kube-controller-manager + namespace: "kube-system" + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-controller-manager +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + jobLabel: jobLabel + selector: + matchLabels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-controller-manager + release: {{ $.Release.Name | quote }} + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: http-metrics + {{- if .Values.kubeControllerManager.serviceMonitor.interval }} + interval: {{ .Values.kubeControllerManager.serviceMonitor.interval }} + {{- end }} + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if .Values.kubeControllerManager.serviceMonitor.https }} + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + {{- if .Values.kubeControllerManager.serviceMonitor.insecureSkipVerify }} + insecureSkipVerify: {{ .Values.kubeControllerManager.serviceMonitor.insecureSkipVerify }} + {{- end }} + {{- if .Values.kubeControllerManager.serviceMonitor.serverName }} + serverName: {{ .Values.kubeControllerManager.serviceMonitor.serverName }} + {{- end }} + {{- end }} +{{- if .Values.kubeControllerManager.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubeControllerManager.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubeControllerManager.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.kubeControllerManager.serviceMonitor.relabelings | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-dns/service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-dns/service.yaml new file mode 100755 index 000000000..c7bf142d5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-dns/service.yaml @@ -0,0 +1,28 @@ +{{- if .Values.kubeDns.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kube-dns + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-dns + jobLabel: kube-dns +{{ include "kube-prometheus-stack.labels" . | indent 4 }} + namespace: kube-system +spec: + clusterIP: None + ports: + - name: http-metrics-dnsmasq + port: {{ .Values.kubeDns.service.dnsmasq.port }} + protocol: TCP + targetPort: {{ .Values.kubeDns.service.dnsmasq.targetPort }} + - name: http-metrics-skydns + port: {{ .Values.kubeDns.service.skydns.port }} + protocol: TCP + targetPort: {{ .Values.kubeDns.service.skydns.targetPort }} + selector: + {{- if .Values.kubeDns.service.selector }} +{{ toYaml .Values.kubeDns.service.selector | indent 4 }} + {{- else}} + k8s-app: kube-dns + {{- end}} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-dns/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-dns/servicemonitor.yaml new file mode 100755 index 000000000..28d06ae83 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-dns/servicemonitor.yaml @@ -0,0 +1,46 @@ +{{- if .Values.kubeDns.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kube-dns + namespace: "kube-system" + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-dns +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + jobLabel: jobLabel + selector: + matchLabels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-dns + release: {{ $.Release.Name | quote }} + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: http-metrics-dnsmasq + {{- if .Values.kubeDns.serviceMonitor.interval }} + interval: {{ .Values.kubeDns.serviceMonitor.interval }} + {{- end }} + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +{{- if .Values.kubeDns.serviceMonitor.dnsmasqMetricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubeDns.serviceMonitor.dnsmasqMetricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubeDns.serviceMonitor.dnsmasqRelabelings }} + relabelings: +{{ toYaml .Values.kubeDns.serviceMonitor.dnsmasqRelabelings | indent 4 }} +{{- end }} + - port: http-metrics-skydns + {{- if .Values.kubeDns.serviceMonitor.interval }} + interval: {{ .Values.kubeDns.serviceMonitor.interval }} + {{- end }} + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +{{- if .Values.kubeDns.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubeDns.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubeDns.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.kubeDns.serviceMonitor.relabelings | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-etcd/endpoints.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-etcd/endpoints.yaml new file mode 100755 index 000000000..8f07a5cc2 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-etcd/endpoints.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.kubeEtcd.enabled .Values.kubeEtcd.endpoints }} +apiVersion: v1 +kind: Endpoints +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kube-etcd + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-etcd + k8s-app: etcd-server +{{ include "kube-prometheus-stack.labels" . | indent 4 }} + namespace: kube-system +subsets: + - addresses: + {{- range .Values.kubeEtcd.endpoints }} + - ip: {{ . }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.kubeEtcd.service.port }} + protocol: TCP +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-etcd/service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-etcd/service.yaml new file mode 100755 index 000000000..b2677e280 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-etcd/service.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.kubeEtcd.enabled .Values.kubeEtcd.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kube-etcd + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-etcd + jobLabel: kube-etcd +{{ include "kube-prometheus-stack.labels" . | indent 4 }} + namespace: kube-system +spec: + clusterIP: None + ports: + - name: http-metrics + port: {{ .Values.kubeEtcd.service.port }} + protocol: TCP + targetPort: {{ .Values.kubeEtcd.service.targetPort }} +{{- if .Values.kubeEtcd.endpoints }}{{- else }} + selector: + {{- if .Values.kubeEtcd.service.selector }} +{{ toYaml .Values.kubeEtcd.service.selector | indent 4 }} + {{- else}} + component: etcd + {{- end}} +{{- end }} + type: ClusterIP +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-etcd/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-etcd/servicemonitor.yaml new file mode 100755 index 000000000..d5816f441 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-etcd/servicemonitor.yaml @@ -0,0 +1,50 @@ +{{- if and .Values.kubeEtcd.enabled .Values.kubeEtcd.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kube-etcd + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-etcd +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + jobLabel: jobLabel + selector: + matchLabels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-etcd + release: {{ $.Release.Name | quote }} + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: http-metrics + {{- if .Values.kubeEtcd.serviceMonitor.interval }} + interval: {{ .Values.kubeEtcd.serviceMonitor.interval }} + {{- end }} + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if eq .Values.kubeEtcd.serviceMonitor.scheme "https" }} + scheme: https + tlsConfig: + {{- if .Values.kubeEtcd.serviceMonitor.serverName }} + serverName: {{ .Values.kubeEtcd.serviceMonitor.serverName }} + {{- end }} + {{- if .Values.kubeEtcd.serviceMonitor.caFile }} + caFile: {{ .Values.kubeEtcd.serviceMonitor.caFile }} + {{- end }} + {{- if .Values.kubeEtcd.serviceMonitor.certFile }} + certFile: {{ .Values.kubeEtcd.serviceMonitor.certFile }} + {{- end }} + {{- if .Values.kubeEtcd.serviceMonitor.keyFile }} + keyFile: {{ .Values.kubeEtcd.serviceMonitor.keyFile }} + {{- end}} + insecureSkipVerify: {{ .Values.kubeEtcd.serviceMonitor.insecureSkipVerify }} + {{- end }} +{{- if .Values.kubeEtcd.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubeEtcd.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubeEtcd.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.kubeEtcd.serviceMonitor.relabelings | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-proxy/endpoints.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-proxy/endpoints.yaml new file mode 100755 index 000000000..2cb756d15 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-proxy/endpoints.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.kubeProxy.enabled .Values.kubeProxy.endpoints }} +apiVersion: v1 +kind: Endpoints +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kube-proxy + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-proxy + k8s-app: kube-proxy +{{ include "kube-prometheus-stack.labels" . | indent 4 }} + namespace: kube-system +subsets: + - addresses: + {{- range .Values.kubeProxy.endpoints }} + - ip: {{ . }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.kubeProxy.service.port }} + protocol: TCP +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-proxy/service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-proxy/service.yaml new file mode 100755 index 000000000..6a93319ef --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-proxy/service.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.kubeProxy.enabled .Values.kubeProxy.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kube-proxy + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-proxy + jobLabel: kube-proxy +{{ include "kube-prometheus-stack.labels" . | indent 4 }} + namespace: kube-system +spec: + clusterIP: None + ports: + - name: http-metrics + port: {{ .Values.kubeProxy.service.port }} + protocol: TCP + targetPort: {{ .Values.kubeProxy.service.targetPort }} +{{- if .Values.kubeProxy.endpoints }}{{- else }} + selector: + {{- if .Values.kubeProxy.service.selector }} +{{ toYaml .Values.kubeProxy.service.selector | indent 4 }} + {{- else}} + k8s-app: kube-proxy + {{- end}} +{{- end }} + type: ClusterIP +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-proxy/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-proxy/servicemonitor.yaml new file mode 100755 index 000000000..ed1632682 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-proxy/servicemonitor.yaml @@ -0,0 +1,38 @@ +{{- if and .Values.kubeProxy.enabled .Values.kubeProxy.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kube-proxy + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-proxy +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + jobLabel: jobLabel + selector: + matchLabels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-proxy + release: {{ $.Release.Name | quote }} + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: http-metrics + {{- if .Values.kubeProxy.serviceMonitor.interval }} + interval: {{ .Values.kubeProxy.serviceMonitor.interval }} + {{- end }} + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if .Values.kubeProxy.serviceMonitor.https }} + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + {{- end}} +{{- if .Values.kubeProxy.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ toYaml .Values.kubeProxy.serviceMonitor.metricRelabelings | indent 4 }} +{{- end }} +{{- if .Values.kubeProxy.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.kubeProxy.serviceMonitor.relabelings | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-scheduler/endpoints.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-scheduler/endpoints.yaml new file mode 100755 index 000000000..f4ad60fd6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-scheduler/endpoints.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.endpoints }} +apiVersion: v1 +kind: Endpoints +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kube-scheduler + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-scheduler + k8s-app: kube-scheduler +{{ include "kube-prometheus-stack.labels" . | indent 4 }} + namespace: kube-system +subsets: + - addresses: + {{- range .Values.kubeScheduler.endpoints }} + - ip: {{ . }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.kubeScheduler.service.port }} + protocol: TCP +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-scheduler/service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-scheduler/service.yaml new file mode 100755 index 000000000..7a9c53da0 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-scheduler/service.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kube-scheduler + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-scheduler + jobLabel: kube-scheduler +{{ include "kube-prometheus-stack.labels" . | indent 4 }} + namespace: kube-system +spec: + clusterIP: None + ports: + - name: http-metrics + port: {{ .Values.kubeScheduler.service.port}} + protocol: TCP + targetPort: {{ .Values.kubeScheduler.service.targetPort}} +{{- if .Values.kubeScheduler.endpoints }}{{- else }} + selector: + {{- if .Values.kubeScheduler.service.selector }} +{{ toYaml .Values.kubeScheduler.service.selector | indent 4 }} + {{- else}} + component: kube-scheduler + {{- end}} +{{- end }} + type: ClusterIP +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-scheduler/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-scheduler/servicemonitor.yaml new file mode 100755 index 000000000..7caef4f58 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-scheduler/servicemonitor.yaml @@ -0,0 +1,44 @@ +{{- if and .Values.kubeScheduler.enabled .Values.kubeScheduler.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kube-scheduler + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-scheduler +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + jobLabel: jobLabel + selector: + matchLabels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-scheduler + release: {{ $.Release.Name | quote }} + namespaceSelector: + matchNames: + - "kube-system" + endpoints: + - port: http-metrics + {{- if .Values.kubeScheduler.serviceMonitor.interval }} + interval: {{ .Values.kubeScheduler.serviceMonitor.interval }} + {{- end }} + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if .Values.kubeScheduler.serviceMonitor.https }} + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + {{- if .Values.kubeScheduler.serviceMonitor.insecureSkipVerify }} + insecureSkipVerify: {{ .Values.kubeScheduler.serviceMonitor.insecureSkipVerify }} + {{- end}} + {{- if .Values.kubeScheduler.serviceMonitor.serverName }} + serverName: {{ .Values.kubeScheduler.serviceMonitor.serverName }} + {{- end}} + {{- end}} +{{- if .Values.kubeScheduler.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubeScheduler.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubeScheduler.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.kubeScheduler.serviceMonitor.relabelings | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-state-metrics/serviceMonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-state-metrics/serviceMonitor.yaml new file mode 100755 index 000000000..5b723b214 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kube-state-metrics/serviceMonitor.yaml @@ -0,0 +1,34 @@ +{{- if .Values.kubeStateMetrics.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kube-state-metrics + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kube-state-metrics +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + jobLabel: app.kubernetes.io/name + endpoints: + - port: http + {{- if .Values.kubeStateMetrics.serviceMonitor.interval }} + interval: {{ .Values.kubeStateMetrics.serviceMonitor.interval }} + {{- end }} + honorLabels: true +{{- if .Values.kubeStateMetrics.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubeStateMetrics.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubeStateMetrics.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.kubeStateMetrics.serviceMonitor.relabelings | indent 4 }} +{{- end }} + selector: + matchLabels: +{{- if .Values.kubeStateMetrics.serviceMonitor.selectorOverride }} +{{ toYaml .Values.kubeStateMetrics.serviceMonitor.selectorOverride | indent 6 }} +{{ else }} + app.kubernetes.io/name: kube-state-metrics + app.kubernetes.io/instance: "{{ $.Release.Name }}" +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kubelet/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kubelet/servicemonitor.yaml new file mode 100755 index 000000000..15811312c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/kubelet/servicemonitor.yaml @@ -0,0 +1,151 @@ +{{- if .Values.kubelet.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-kubelet + namespace: {{ .Values.kubelet.namespace }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-kubelet +{{- include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + endpoints: + {{- if .Values.kubelet.serviceMonitor.https }} + - port: https-metrics + scheme: https + {{- if .Values.kubelet.serviceMonitor.interval }} + interval: {{ .Values.kubelet.serviceMonitor.interval }} + {{- end }} + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecureSkipVerify: true + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + honorLabels: true +{{- if .Values.kubelet.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubelet.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubelet.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.kubelet.serviceMonitor.relabelings | indent 4 }} +{{- end }} +{{- if .Values.kubelet.serviceMonitor.cAdvisor }} + - port: https-metrics + scheme: https + path: /metrics/cadvisor + {{- if .Values.kubelet.serviceMonitor.interval }} + interval: {{ .Values.kubelet.serviceMonitor.interval }} + {{- end }} + {{- if .Values.kubelet.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.kubelet.serviceMonitor.scrapeTimeout }} + {{- end }} + honorLabels: true + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecureSkipVerify: true + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +{{- if .Values.kubelet.serviceMonitor.cAdvisorMetricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubelet.serviceMonitor.cAdvisorMetricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubelet.serviceMonitor.cAdvisorRelabelings }} + relabelings: +{{ toYaml .Values.kubelet.serviceMonitor.cAdvisorRelabelings | indent 4 }} +{{- end }} +{{- end }} +{{- if .Values.kubelet.serviceMonitor.probes }} + - port: https-metrics + scheme: https + path: /metrics/probes + {{- if .Values.kubelet.serviceMonitor.interval }} + interval: {{ .Values.kubelet.serviceMonitor.interval }} + {{- end }} + honorLabels: true + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecureSkipVerify: true + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +{{- if .Values.kubelet.serviceMonitor.probesMetricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubelet.serviceMonitor.probesMetricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubelet.serviceMonitor.probesRelabelings }} + relabelings: +{{ toYaml .Values.kubelet.serviceMonitor.probesRelabelings | indent 4 }} +{{- end }} +{{- end }} +{{- if .Values.kubelet.serviceMonitor.resource }} + - port: https-metrics + scheme: https + path: {{ include "kubelet.serviceMonitor.resourcePath" . }} + {{- if .Values.kubelet.serviceMonitor.interval }} + interval: {{ .Values.kubelet.serviceMonitor.interval }} + {{- end }} + honorLabels: true + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + insecureSkipVerify: true + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +{{- if .Values.kubelet.serviceMonitor.resourceMetricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubelet.serviceMonitor.resourceMetricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubelet.serviceMonitor.resourceRelabelings }} + relabelings: +{{ toYaml .Values.kubelet.serviceMonitor.resourceRelabelings | indent 4 }} +{{- end }} +{{- end }} + {{- else }} + - port: http-metrics + {{- if .Values.kubelet.serviceMonitor.interval }} + interval: {{ .Values.kubelet.serviceMonitor.interval }} + {{- end }} + honorLabels: true +{{- if .Values.kubelet.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubelet.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubelet.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.kubelet.serviceMonitor.relabelings | indent 4 }} +{{- end }} +{{- if .Values.kubelet.serviceMonitor.cAdvisor }} + - port: http-metrics + path: /metrics/cadvisor + {{- if .Values.kubelet.serviceMonitor.interval }} + interval: {{ .Values.kubelet.serviceMonitor.interval }} + {{- end }} + honorLabels: true +{{- if .Values.kubelet.serviceMonitor.cAdvisorMetricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubelet.serviceMonitor.cAdvisorMetricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubelet.serviceMonitor.cAdvisorRelabelings }} + relabelings: +{{ toYaml .Values.kubelet.serviceMonitor.cAdvisorRelabelings | indent 4 }} +{{- end }} +{{- if .Values.kubelet.serviceMonitor.resource }} + - port: http-metrics + path: {{ include "kubelet.serviceMonitor.resourcePath" . }} + {{- if .Values.kubelet.serviceMonitor.interval }} + interval: {{ .Values.kubelet.serviceMonitor.interval }} + {{- end }} + honorLabels: true +{{- if .Values.kubelet.serviceMonitor.resourceMetricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.kubelet.serviceMonitor.resourceMetricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.kubelet.serviceMonitor.resourceRelabelings }} + relabelings: +{{ toYaml .Values.kubelet.serviceMonitor.resourceRelabelings | indent 4 }} +{{- end }} +{{- end }} +{{- end }} + {{- end }} + jobLabel: k8s-app + namespaceSelector: + matchNames: + - {{ .Values.kubelet.namespace }} + selector: + matchLabels: + k8s-app: kubelet +{{- end}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/node-exporter/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/node-exporter/servicemonitor.yaml new file mode 100755 index 000000000..5ca5f1b75 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/exporters/node-exporter/servicemonitor.yaml @@ -0,0 +1,32 @@ +{{- if .Values.nodeExporter.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-node-exporter + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-node-exporter +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + jobLabel: {{ .Values.nodeExporter.jobLabel }} + selector: + matchLabels: + app: prometheus-node-exporter + release: {{ $.Release.Name }} + endpoints: + - port: metrics + {{- if .Values.nodeExporter.serviceMonitor.interval }} + interval: {{ .Values.nodeExporter.serviceMonitor.interval }} + {{- end }} + {{- if .Values.nodeExporter.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.nodeExporter.serviceMonitor.scrapeTimeout }} + {{- end }} +{{- if .Values.nodeExporter.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.nodeExporter.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.nodeExporter.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.nodeExporter.serviceMonitor.relabelings | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/configmap-dashboards.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/configmap-dashboards.yaml new file mode 100755 index 000000000..f11af8285 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/configmap-dashboards.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- $files := .Files.Glob "dashboards/*.json" }} +{{- if $files }} +apiVersion: v1 +kind: ConfigMapList +items: +{{- range $path, $fileContents := $files }} +{{- $dashboardName := regexReplaceAll "(^.*/)(.*)\\.json$" $path "${2}" }} +- apiVersion: v1 + kind: ConfigMap + metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) $dashboardName | trunc 63 | trimSuffix "-" }} + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 6 }} + data: + {{ $dashboardName }}.json: {{ $.Files.Get $path | toJson }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/configmaps-datasources.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/configmaps-datasources.yaml new file mode 100755 index 000000000..c6700d84e --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/configmaps-datasources.yaml @@ -0,0 +1,43 @@ +{{- if and .Values.grafana.enabled .Values.grafana.sidecar.datasources.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-grafana-datasource + namespace: {{ default .Values.grafana.sidecar.datasources.searchNamespace (include "kube-prometheus-stack.namespace" .) }} +{{- if .Values.grafana.sidecar.datasources.annotations }} + annotations: +{{ toYaml .Values.grafana.sidecar.datasources.annotations | indent 4 }} +{{- end }} + labels: + {{ $.Values.grafana.sidecar.datasources.label }}: "1" + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + datasource.yaml: |- + apiVersion: 1 + datasources: +{{- $scrapeInterval := .Values.grafana.sidecar.datasources.defaultDatasourceScrapeInterval | default .Values.prometheus.prometheusSpec.scrapeInterval | default "30s" }} +{{- if .Values.grafana.sidecar.datasources.defaultDatasourceEnabled }} + - name: Prometheus + type: prometheus + url: http://{{ template "kube-prometheus-stack.fullname" . }}-prometheus:{{ .Values.prometheus.service.port }}/{{ trimPrefix "/" .Values.prometheus.prometheusSpec.routePrefix }} + access: proxy + isDefault: true + jsonData: + timeInterval: {{ $scrapeInterval }} +{{- if .Values.grafana.sidecar.datasources.createPrometheusReplicasDatasources }} +{{- range until (int .Values.prometheus.prometheusSpec.replicas) }} + - name: Prometheus-{{ . }} + type: prometheus + url: http://prometheus-{{ template "kube-prometheus-stack.fullname" $ }}-prometheus-{{ . }}.prometheus-operated:9090/{{ trimPrefix "/" $.Values.prometheus.prometheusSpec.routePrefix }} + access: proxy + isDefault: false + jsonData: + timeInterval: {{ $scrapeInterval }} +{{- end }} +{{- end }} +{{- end }} +{{- if .Values.grafana.additionalDataSources }} +{{ tpl (toYaml .Values.grafana.additionalDataSources | indent 4) . }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/apiserver.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/apiserver.yaml new file mode 100755 index 000000000..efed40873 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/apiserver.yaml @@ -0,0 +1,1747 @@ +{{- /* +Generated from 'apiserver' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.kubeApiServer.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "apiserver" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + apiserver.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "panels": [ + { + "content": "The SLO (service level objective) and other metrics displayed on this dashboard are for informational purposes only.", + "datasource": null, + "description": "The SLO (service level objective) and other metrics displayed on this dashboard are for informational purposes only.", + "gridPos": { + "h": 2, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "mode": "markdown", + "span": 12, + "title": "Notice", + "type": "text" + } + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 3, + "description": "How many percent of requests (both read and write) in 30 days have been answered successfully and fast enough?", + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 3, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "apiserver_request:availability30d{verb=\"all\", cluster=\"$cluster\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Availability (30d) > 99.000%", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "decimals": 3, + "description": "How much error budget is left looking at our 0.990% availability guarantees?", + "fill": 10, + "fillGradient": 0, + "gridPos": { + + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 8, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "100 * (apiserver_request:availability30d{verb=\"all\", cluster=\"$cluster\"} - 0.990000)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "errorbudget", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "ErrorBudget (30d) > 99.000%", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "decimals": 3, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "decimals": 3, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 3, + "description": "How many percent of read requests (LIST,GET) in 30 days have been answered successfully and fast enough?", + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 5, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "apiserver_request:availability30d{verb=\"read\", cluster=\"$cluster\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Read Availability (30d)", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "How many read requests (LIST,GET) per second do the apiservers get by code?", + "fill": 10, + "fillGradient": 0, + "gridPos": { + + }, + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/2../i", + "color": "#56A64B" + }, + { + "alias": "/3../i", + "color": "#F2CC0C" + }, + { + "alias": "/4../i", + "color": "#3274D9" + }, + { + "alias": "/5../i", + "color": "#E02F44" + } + ], + "spaceLength": 10, + "span": 3, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (code) (code_resource:apiserver_request_total:rate5m{verb=\"read\", cluster=\"$cluster\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}} code {{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Read SLI - Requests", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "How many percent of read requests (LIST,GET) per second are returned with errors (5xx)?", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 7, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (resource) (code_resource:apiserver_request_total:rate5m{verb=\"read\",code=~\"5..\", cluster=\"$cluster\"}) / sum by (resource) (code_resource:apiserver_request_total:rate5m{verb=\"read\", cluster=\"$cluster\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}} resource {{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Read SLI - Errors", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "How many seconds is the 99th percentile for reading (LIST|GET) a given resource?", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 8, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "cluster_quantile:apiserver_request_duration_seconds:histogram_quantile{verb=\"read\", cluster=\"$cluster\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}} resource {{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Read SLI - Duration", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 3, + "description": "How many percent of write requests (POST|PUT|PATCH|DELETE) in 30 days have been answered successfully and fast enough?", + "format": "percentunit", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 9, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "apiserver_request:availability30d{verb=\"write\", cluster=\"$cluster\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Write Availability (30d)", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "How many write requests (POST|PUT|PATCH|DELETE) per second do the apiservers get by code?", + "fill": 10, + "fillGradient": 0, + "gridPos": { + + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/2../i", + "color": "#56A64B" + }, + { + "alias": "/3../i", + "color": "#F2CC0C" + }, + { + "alias": "/4../i", + "color": "#3274D9" + }, + { + "alias": "/5../i", + "color": "#E02F44" + } + ], + "spaceLength": 10, + "span": 3, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (code) (code_resource:apiserver_request_total:rate5m{verb=\"write\", cluster=\"$cluster\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}} code {{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Write SLI - Requests", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "reqps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "How many percent of write requests (POST|PUT|PATCH|DELETE) per second are returned with errors (5xx)?", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 11, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (resource) (code_resource:apiserver_request_total:rate5m{verb=\"write\",code=~\"5..\", cluster=\"$cluster\"}) / sum by (resource) (code_resource:apiserver_request_total:rate5m{verb=\"write\", cluster=\"$cluster\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}} resource {{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Write SLI - Errors", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "How many seconds is the 99th percentile for writing (POST|PUT|PATCH|DELETE) a given resource?", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "cluster_quantile:apiserver_request_duration_seconds:histogram_quantile{verb=\"write\", cluster=\"$cluster\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}} resource {{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Write SLI - Duration", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 13, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(workqueue_adds_total{job=\"apiserver\", instance=~\"$instance\", cluster=\"$cluster\"}[5m])) by (instance, name)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}name{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Work Queue Add Rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 14, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(workqueue_depth{job=\"apiserver\", instance=~\"$instance\", cluster=\"$cluster\"}[5m])) by (instance, name)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}name{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Work Queue Depth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 15, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(workqueue_queue_duration_seconds_bucket{job=\"apiserver\", instance=~\"$instance\", cluster=\"$cluster\"}[5m])) by (instance, name, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}name{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Work Queue Latency", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 16, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "process_resident_memory_bytes{job=\"apiserver\",instance=~\"$instance\", cluster=\"$cluster\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 17, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(process_cpu_seconds_total{job=\"apiserver\",instance=~\"$instance\", cluster=\"$cluster\"}[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 18, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_goroutines{job=\"apiserver\",instance=~\"$instance\", cluster=\"$cluster\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Goroutines", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(apiserver_request_total, cluster)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "instance", + "options": [ + + ], + "query": "label_values(apiserver_request_total{job=\"apiserver\", cluster=\"$cluster\"}, instance)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / API server", + "uid": "09ec8aa1e996d6ffcd6817bbaff4db1b", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/cluster-total.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/cluster-total.yaml new file mode 100755 index 000000000..fde561c82 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/cluster-total.yaml @@ -0,0 +1,1882 @@ +{{- /* +Generated from 'cluster-total' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "cluster-total" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + cluster-total.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ + + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Current Bandwidth", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": { + + }, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 3, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Rate of Bytes Received", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Rate of Bytes Transmitted", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "columns": [ + { + "text": "Time", + "value": "Time" + }, + { + "text": "Value #A", + "value": "Value #A" + }, + { + "text": "Value #B", + "value": "Value #B" + }, + { + "text": "Value #C", + "value": "Value #C" + }, + { + "text": "Value #D", + "value": "Value #D" + }, + { + "text": "Value #E", + "value": "Value #E" + }, + { + "text": "Value #F", + "value": "Value #F" + }, + { + "text": "Value #G", + "value": "Value #G" + }, + { + "text": "Value #H", + "value": "Value #H" + }, + { + "text": "namespace", + "value": "namespace" + } + ], + "datasource": "$datasource", + "fill": 1, + "fontSize": "90%", + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 5, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "null as zero", + "renderer": "flot", + "scroll": true, + "showHeader": true, + "sort": { + "col": 0, + "desc": false + }, + "spaceLength": 10, + "span": 24, + "styles": [ + { + "alias": "Time", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Time", + "thresholds": [ + + ], + "type": "hidden", + "unit": "short" + }, + { + "alias": "Current Bandwidth Received", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Current Bandwidth Transmitted", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Average Bandwidth Received", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Average Bandwidth Transmitted", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Rate of Received Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Received Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #G", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #H", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Namespace", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down", + "linkUrl": "d/8b7a8b326d7a6f1f04244066368c67af/kubernetes-networking-namespace-pods?orgId=1&refresh=30s&var-namespace=$__cell", + "pattern": "namespace", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sort_desc(sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sort_desc(avg(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sort_desc(avg(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sort_desc(sum(irate(container_network_receive_packets_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sort_desc(sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sort_desc(sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sort_desc(sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Status", + "type": "table" + }, + { + "collapse": true, + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 6, + "panels": [ + { + "aliasColors": { + + }, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 11 + }, + "id": 7, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(avg(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Average Rate of Bytes Received", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 11 + }, + "id": 8, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(avg(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Average Rate of Bytes Transmitted", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Average Bandwidth", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 11 + }, + "id": 9, + "panels": [ + + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Bandwidth History", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 12 + }, + "id": 10, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Receive Bandwidth", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 21 + }, + "id": 11, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Transmit Bandwidth", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "collapse": true, + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 30 + }, + "id": 12, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 31 + }, + "id": 13, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_packets_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 40 + }, + "id": 14, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Packets", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": true, + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 31 + }, + "id": 15, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 50 + }, + "id": 16, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets Dropped", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 59 + }, + "id": 17, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\",namespace=~\".+\"}[$interval:$resolution])) by (namespace))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets Dropped", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 59 + }, + "id": 18, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + { + "targetBlank": true, + "title": "What is TCP Retransmit?", + "url": "https://accedian.com/enterprises/blog/network-packet-loss-retransmissions-and-duplicate-acknowledgements/" + } + ], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(rate(node_netstat_Tcp_RetransSegs{cluster=\"$cluster\"}[$interval:$resolution]) / rate(node_netstat_Tcp_OutSegs{cluster=\"$cluster\"}[$interval:$resolution])) by (instance))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of TCP Retransmits out of all sent segments", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 59 + }, + "id": 19, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [ + { + "targetBlank": true, + "title": "Why monitor SYN retransmits?", + "url": "https://github.com/prometheus/node_exporter/issues/1023#issuecomment-408128365" + } + ], + "minSpan": 24, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(rate(node_netstat_TcpExt_TCPSynRetrans{cluster=\"$cluster\"}[$interval:$resolution]) / rate(node_netstat_Tcp_RetransSegs{cluster=\"$cluster\"}[$interval:$resolution])) by (instance))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of TCP SYN Retransmits out of all retransmits", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Errors", + "titleSize": "h6", + "type": "row" + } + ], + "refresh": "10s", + "rows": [ + + ], + "schemaVersion": 18, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "5m", + "value": "5m" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "resolution", + "options": [ + { + "selected": false, + "text": "30s", + "value": "30s" + }, + { + "selected": true, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + } + ], + "query": "30s,5m,1h", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "interval", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "5m", + "value": "5m" + }, + "datasource": "$datasource", + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "interval", + "options": [ + { + "selected": true, + "text": "4h", + "value": "4h" + } + ], + "query": "4h", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "interval", + "useTags": false + }, + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Networking / Cluster", + "uid": "ff635a025bcfea7bc3dd4f508990a3e9", + "version": 0 + } +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/controller-manager.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/controller-manager.yaml new file mode 100755 index 000000000..675cbe618 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/controller-manager.yaml @@ -0,0 +1,1153 @@ +{{- /* +Generated from 'controller-manager' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if (include "exporter.kubeControllerManager.enabled" .)}} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "controller-manager" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + controller-manager.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 2, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(up{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Up", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "min" + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 3, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(workqueue_adds_total{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance, name)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}name{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Work Queue Add Rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(workqueue_depth{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance, name)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}name{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Work Queue Depth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 5, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(workqueue_queue_duration_seconds_bucket{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance, name, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}name{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Work Queue Latency", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\",code=~\"2..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "2xx", + "refId": "A" + }, + { + "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\",code=~\"3..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "3xx", + "refId": "B" + }, + { + "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\",code=~\"4..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "4xx", + "refId": "C" + }, + { + "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\",code=~\"5..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "5xx", + "refId": "D" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Kube API Request Rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 7, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 8, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\", verb=\"POST\"}[5m])) by (verb, url, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}verb{{`}}`}} {{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Post Request Latency 99th Quantile", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 8, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\", instance=~\"$instance\", verb=\"GET\"}[5m])) by (verb, url, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}verb{{`}}`}} {{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Get Request Latency 99th Quantile", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "process_resident_memory_bytes{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\",instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(process_cpu_seconds_total{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\",instance=~\"$instance\"}[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 11, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_goroutines{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\",instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Goroutines", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "instance", + "options": [ + + ], + "query": "label_values(process_cpu_seconds_total{job=\"{{ include "exporter.kubeControllerManager.jobName" . }}\"}, instance)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Controller Manager", + "uid": "72e0e05bef5099e5f049b05fdc429ed4", + "version": 0 + } +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/etcd.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/etcd.yaml new file mode 100755 index 000000000..78f230581 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/etcd.yaml @@ -0,0 +1,1118 @@ +{{- /* +Generated from 'etcd' from https://raw.githubusercontent.com/etcd-io/website/master/content/docs/current/op-guide/grafana.json +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if (include "exporter.kubeEtcd.enabled" .)}} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "etcd" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + etcd.json: |- + { + "annotations": { + "list": [] + }, + "description": "etcd sample Grafana dashboard with Prometheus", + "editable": true, + "gnetId": null, + "hideControls": false, + "links": [], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "editable": true, + "height": "250px", + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "$datasource", + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 28, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "sum(etcd_server_has_leader{job=\"$cluster\"})", + "intervalFactor": 2, + "legendFormat": "", + "metric": "etcd_server_has_leader", + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Up", + "type": "singlestat", + "valueFontSize": "200%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 0, + "id": 23, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 5, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(grpc_server_started_total{job=\"$cluster\",grpc_type=\"unary\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "RPC Rate", + "metric": "grpc_server_started_total", + "refId": "A", + "step": 2 + }, + { + "expr": "sum(rate(grpc_server_handled_total{job=\"$cluster\",grpc_type=\"unary\",grpc_code!=\"OK\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "RPC Failed Rate", + "metric": "grpc_server_handled_total", + "refId": "B", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "RPC Rate", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 0, + "id": 41, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 4, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(grpc_server_started_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"})", + "intervalFactor": 2, + "legendFormat": "Watch Streams", + "metric": "grpc_server_handled_total", + "refId": "A", + "step": 4 + }, + { + "expr": "sum(grpc_server_started_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"})", + "intervalFactor": 2, + "legendFormat": "Lease Streams", + "metric": "grpc_server_handled_total", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Active Streams", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "showTitle": false, + "title": "Row" + }, + { + "collapse": false, + "editable": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "decimals": null, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "etcd_mvcc_db_total_size_in_bytes{job=\"$cluster\"}", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} DB Size", + "metric": "", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "DB Size", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 1, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 4, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=\"$cluster\"}[5m])) by (instance, le))", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} WAL fsync", + "metric": "etcd_disk_wal_fsync_duration_seconds_bucket", + "refId": "A", + "step": 4 + }, + { + "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket{job=\"$cluster\"}[5m])) by (instance, le))", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} DB fsync", + "metric": "etcd_disk_backend_commit_duration_seconds_bucket", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Disk Sync Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 0, + "id": 29, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "process_resident_memory_bytes{job=\"$cluster\"}", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Resident Memory", + "metric": "process_resident_memory_bytes", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Memory", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "title": "New row" + }, + { + "collapse": false, + "editable": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 5, + "id": 22, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 3, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(etcd_network_client_grpc_received_bytes_total{job=\"$cluster\"}[5m])", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Client Traffic In", + "metric": "etcd_network_client_grpc_received_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Client Traffic In", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 5, + "id": 21, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 3, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(etcd_network_client_grpc_sent_bytes_total{job=\"$cluster\"}[5m])", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Client Traffic Out", + "metric": "etcd_network_client_grpc_sent_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Client Traffic Out", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 0, + "id": 20, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_network_peer_received_bytes_total{job=\"$cluster\"}[5m])) by (instance)", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Peer Traffic In", + "metric": "etcd_network_peer_received_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Peer Traffic In", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "decimals": null, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_network_peer_sent_bytes_total{job=\"$cluster\"}[5m])) by (instance)", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Peer Traffic Out", + "metric": "etcd_network_peer_sent_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Peer Traffic Out", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "title": "New row" + }, + { + "collapse": false, + "editable": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 0, + "id": 40, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_server_proposals_failed_total{job=\"$cluster\"}[5m]))", + "intervalFactor": 2, + "legendFormat": "Proposal Failure Rate", + "metric": "etcd_server_proposals_failed_total", + "refId": "A", + "step": 2 + }, + { + "expr": "sum(etcd_server_proposals_pending{job=\"$cluster\"})", + "intervalFactor": 2, + "legendFormat": "Proposal Pending Total", + "metric": "etcd_server_proposals_pending", + "refId": "B", + "step": 2 + }, + { + "expr": "sum(rate(etcd_server_proposals_committed_total{job=\"$cluster\"}[5m]))", + "intervalFactor": 2, + "legendFormat": "Proposal Commit Rate", + "metric": "etcd_server_proposals_committed_total", + "refId": "C", + "step": 2 + }, + { + "expr": "sum(rate(etcd_server_proposals_applied_total{job=\"$cluster\"}[5m]))", + "intervalFactor": 2, + "legendFormat": "Proposal Apply Rate", + "refId": "D", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Raft Proposals", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "decimals": 0, + "editable": true, + "error": false, + "fill": 0, + "id": 19, + "isNew": true, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "changes(etcd_server_leader_changes_seen_total{job=\"$cluster\"}[1d])", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Total Leader Elections Per Day", + "metric": "etcd_server_leader_changes_seen_total", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Total Leader Elections Per Day", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "title": "New row" + } + ], + "schemaVersion": 13, + "sharedCrosshair": false, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": "label_values(etcd_server_has_leader, job)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "now": true, + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "etcd", + "uid": "c2f4e12cdf69feb95caa41a5a1b423d9", + "version": 215 + } +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-coredns.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-coredns.yaml new file mode 100755 index 000000000..8e4eaec61 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-coredns.yaml @@ -0,0 +1,1529 @@ +{{- /* Added manually, can be changed in-place. */ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.coreDns.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-coredns" | trunc 63 | trimSuffix "-" }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + k8s-coredns.json: |- + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "A dashboard for the CoreDNS DNS server with updated metrics for version 1.7.0+. Based on the CoreDNS dashboard by buhay.", + "editable": true, + "gnetId": 12539, + "graphTooltip": 0, + "iteration": 1603798405693, + "links": [ + { + "icon": "external link", + "tags": [], + "targetBlank": true, + "title": "CoreDNS.io", + "type": "link", + "url": "https://coredns.io" + } + ], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.2.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(coredns_dns_request_count_total{instance=~\"$instance\"}[5m])) by (proto) or\nsum(rate(coredns_dns_requests_total{instance=~\"$instance\"}[5m])) by (proto)", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{"{{proto}}"}}", + "refId": "A", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests (total)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "pps", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.2.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "yaxis": 2 + }, + { + "alias": "other", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(coredns_dns_request_type_count_total{instance=~\"$instance\"}[5m])) by (type) or \nsum(rate(coredns_dns_requests_total{instance=~\"$instance\"}[5m])) by (type)", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{"{{type}}"}}", + "refId": "A", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests (by qtype)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "pps", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "hiddenSeries": false, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.2.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(coredns_dns_request_count_total{instance=~\"$instance\"}[5m])) by (zone) or\nsum(rate(coredns_dns_requests_total{instance=~\"$instance\"}[5m])) by (zone)", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{"{{zone}}"}}", + "refId": "A", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests (by zone)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "pps", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.2.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "total", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(coredns_dns_request_do_count_total{instance=~\"$instance\"}[5m])) or\nsum(rate(coredns_dns_do_requests_total{instance=~\"$instance\"}[5m]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "DO", + "refId": "A", + "step": 40 + }, + { + "expr": "sum(rate(coredns_dns_request_count_total{instance=~\"$instance\"}[5m])) or\nsum(rate(coredns_dns_requests_total{instance=~\"$instance\"}[5m]))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "total", + "refId": "B", + "step": 40 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests (DO bit)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "pps", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 6, + "x": 12, + "y": 7 + }, + "hiddenSeries": false, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.2.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "tcp:90", + "yaxis": 2 + }, + { + "alias": "tcp:99 ", + "yaxis": 2 + }, + { + "alias": "tcp:50", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\"$instance\",proto=\"udp\"}[5m])) by (le,proto))", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{"{{proto}}"}}:99 ", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.90, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\"$instance\",proto=\"udp\"}[5m])) by (le,proto))", + "intervalFactor": 2, + "legendFormat": "{{"{{proto}}"}}:90", + "refId": "B", + "step": 60 + }, + { + "expr": "histogram_quantile(0.50, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\"$instance\",proto=\"udp\"}[5m])) by (le,proto))", + "intervalFactor": 2, + "legendFormat": "{{"{{proto}}"}}:50", + "refId": "C", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests (size, udp)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 6, + "x": 18, + "y": 7 + }, + "hiddenSeries": false, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.2.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "tcp:90", + "yaxis": 1 + }, + { + "alias": "tcp:99 ", + "yaxis": 1 + }, + { + "alias": "tcp:50", + "yaxis": 1 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\"$instance\",proto=\"tcp\"}[5m])) by (le,proto))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{"{{proto}}"}}:99 ", + "refId": "A", + "step": 60 + }, + { + "expr": "histogram_quantile(0.90, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\"$instance\",proto=\"tcp\"}[5m])) by (le,proto))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{"{{proto}}"}}:90", + "refId": "B", + "step": 60 + }, + { + "expr": "histogram_quantile(0.50, sum(rate(coredns_dns_request_size_bytes_bucket{instance=~\"$instance\",proto=\"tcp\"}[5m])) by (le,proto))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{"{{proto}}"}}:50", + "refId": "C", + "step": 60 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests (size,tcp)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 14 + }, + "hiddenSeries": false, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.2.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(coredns_dns_response_rcode_count_total{instance=~\"$instance\"}[5m])) by (rcode) or\nsum(rate(coredns_dns_responses_total{instance=~\"$instance\"}[5m])) by (rcode)", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{"{{rcode}}"}}", + "refId": "A", + "step": 40 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Responses (by rcode)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "pps", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 14 + }, + "hiddenSeries": false, + "id": 32, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.2.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(coredns_dns_request_duration_seconds_bucket{instance=~\"$instance\"}[5m])) by (le, job))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "99%", + "refId": "A", + "step": 40 + }, + { + "expr": "histogram_quantile(0.90, sum(rate(coredns_dns_request_duration_seconds_bucket{instance=~\"$instance\"}[5m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "90%", + "refId": "B", + "step": 40 + }, + { + "expr": "histogram_quantile(0.50, sum(rate(coredns_dns_request_duration_seconds_bucket{instance=~\"$instance\"}[5m])) by (le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "50%", + "refId": "C", + "step": 40 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Responses (duration)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 21 + }, + "hiddenSeries": false, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.2.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "udp:50%", + "yaxis": 1 + }, + { + "alias": "tcp:50%", + "yaxis": 2 + }, + { + "alias": "tcp:90%", + "yaxis": 2 + }, + { + "alias": "tcp:99%", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\"$instance\",proto=\"udp\"}[5m])) by (le,proto)) ", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{"{{proto}}"}}:99%", + "refId": "A", + "step": 40 + }, + { + "expr": "histogram_quantile(0.90, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\"$instance\",proto=\"udp\"}[5m])) by (le,proto)) ", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{"{{proto}}"}}:90%", + "refId": "B", + "step": 40 + }, + { + "expr": "histogram_quantile(0.50, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\"$instance\",proto=\"udp\"}[5m])) by (le,proto)) ", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{"{{proto}}"}}:50%", + "metric": "", + "refId": "C", + "step": 40 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Responses (size, udp)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 21 + }, + "hiddenSeries": false, + "id": 20, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.2.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "udp:50%", + "yaxis": 1 + }, + { + "alias": "tcp:50%", + "yaxis": 1 + }, + { + "alias": "tcp:90%", + "yaxis": 1 + }, + { + "alias": "tcp:99%", + "yaxis": 1 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\"$instance\",proto=\"tcp\"}[5m])) by (le,proto)) ", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{"{{proto}}"}}:99%", + "refId": "A", + "step": 40 + }, + { + "expr": "histogram_quantile(0.90, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\"$instance\",proto=\"tcp\"}[5m])) by (le,proto)) ", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{"{{proto}}"}}:90%", + "refId": "B", + "step": 40 + }, + { + "expr": "histogram_quantile(0.50, sum(rate(coredns_dns_response_size_bytes_bucket{instance=~\"$instance\",proto=\"tcp\"}[5m])) by (le, proto)) ", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{"{{proto}}"}}:50%", + "metric": "", + "refId": "C", + "step": 40 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Responses (size, tcp)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 28 + }, + "hiddenSeries": false, + "id": 22, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.2.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(coredns_cache_size{instance=~\"$instance\"}) by (type) or\nsum(coredns_cache_entries{instance=~\"$instance\"}) by (type)", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{"{{type}}"}}", + "refId": "A", + "step": 40 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Cache (size)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "decbytes", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 28 + }, + "hiddenSeries": false, + "id": 24, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.2.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "misses", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(coredns_cache_hits_total{instance=~\"$instance\"}[5m])) by (type)", + "hide": false, + "intervalFactor": 2, + "legendFormat": "hits:{{"{{type}}"}}", + "refId": "A", + "step": 40 + }, + { + "expr": "sum(rate(coredns_cache_misses_total{instance=~\"$instance\"}[5m])) by (type)", + "hide": false, + "intervalFactor": 2, + "legendFormat": "misses", + "refId": "B", + "step": 40 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Cache (hitrate)", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "pps", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "10s", + "schemaVersion": 26, + "style": "dark", + "tags": [ + "dns", + "coredns" + ], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": "default", + "value": "default" + }, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "datasource", + "options": [], + "query": "prometheus", + "queryValue": "", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": ".*", + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "datasource": "$datasource", + "definition": "label_values(up{job=\"coredns\"}, instance)", + "hide": 0, + "includeAll": true, + "label": "Instance", + "multi": false, + "name": "instance", + "options": [], + "query": "label_values(up{job=\"coredns\"}, instance)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 3, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "utc", + "title": "CoreDNS", + "uid": "vkQ0UHxik", + "version": 2 + } +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml new file mode 100755 index 000000000..9639fc15c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-cluster.yaml @@ -0,0 +1,2582 @@ +{{- /* +Generated from 'k8s-resources-cluster' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-cluster" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + k8s-resources-cluster.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "100px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 1, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 2, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - avg(rate(node_cpu_seconds_total{mode=\"idle\", cluster=\"$cluster\"}[$__rate_interval]))", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 2, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable_cpu_cores{cluster=\"$cluster\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "CPU Requests Commitment", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 2, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable_cpu_cores{cluster=\"$cluster\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "CPU Limits Commitment", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 2, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - sum(:node_memory_MemAvailable_bytes:sum{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 2, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "Memory Requests Commitment", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 2, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\"}) / sum(kube_node_status_allocatable_memory_bytes{cluster=\"$cluster\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "Memory Limits Commitment", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Headlines", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Pods", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down to pods", + "linkUrl": "./d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_1", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Workloads", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down to workloads", + "linkUrl": "./d/a87fb0d919ec0ea5f6543124e16c42a5/k8s-resources-workloads-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_1", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "CPU Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #G", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Namespace", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down to pods", + "linkUrl": "./d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell", + "pattern": "namespace", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(kube_pod_owner{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "count(avg(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\"}) by (workload, namespace)) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage (w/o cache)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Pods", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down to pods", + "linkUrl": "./d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_1", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Workloads", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down to workloads", + "linkUrl": "./d/a87fb0d919ec0ea5f6543124e16c42a5/k8s-resources-workloads-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_1", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Memory Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #G", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Namespace", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down to pods", + "linkUrl": "./d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell", + "pattern": "namespace", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(kube_pod_owner{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "count(avg(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\"}) by (workload, namespace)) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container!=\"\"}) by (namespace) / sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Requests by Namespace", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Requests", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 11, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Current Receive Bandwidth", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Current Transmit Bandwidth", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Rate of Received Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Received Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Namespace", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down to pods", + "linkUrl": "./d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell", + "pattern": "namespace", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Network Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Receive Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 13, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Transmit Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "avg(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Average Container Bandwidth by Namespace: Received", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 15, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "avg(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Average Container Bandwidth by Namespace: Transmitted", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 17, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 18, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets Dropped", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 19, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\".+\"}[$__rate_interval])) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets Dropped", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(node_cpu_seconds_total, cluster)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Compute Resources / Cluster", + "uid": "efa86fd1d0c121a26444b636a3f509a8", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml new file mode 100755 index 000000000..40355c6b8 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-namespace.yaml @@ -0,0 +1,2286 @@ +{{- /* +Generated from 'k8s-resources-namespace' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-namespace" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + k8s-resources-namespace.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "100px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation (from requests)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation (from limits)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) / sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$namespace\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilization (from requests)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) / sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilisation (from limits)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Headlines", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "quota - requests", + "color": "#F2495C", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + }, + { + "alias": "quota - limits", + "color": "#FF9830", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"requests.cpu\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - requests", + "legendLink": null, + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"limits.cpu\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - limits", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "CPU Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "CPU Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "quota - requests", + "color": "#F2495C", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + }, + { + "alias": "quota - limits", + "color": "#FF9830", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"requests.memory\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - requests", + "legendLink": null, + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"limits.memory\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - limits", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage (w/o cache)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Memory Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Usage (RSS)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Usage (Cache)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #G", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Usage (Swap)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #H", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) by (pod) / sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\", image!=\"\"}) by (pod) / sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sum(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\",container!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 9, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Current Receive Bandwidth", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Current Transmit Bandwidth", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Rate of Received Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Received Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down to pods", + "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Network Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Receive Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 11, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Transmit Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 13, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 14, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets Dropped", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 15, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets Dropped", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Compute Resources / Namespace (Pods)", + "uid": "85a562078cdf77779eaa1add43ccec1e", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-node.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-node.yaml new file mode 100755 index 000000000..ce6628ae5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-node.yaml @@ -0,0 +1,978 @@ +{{- /* +Generated from 'k8s-resources-node' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-node" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + k8s-resources-node.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", node=~\"$node\"}) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "CPU Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "CPU Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", node=~\"$node\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", node=~\"$node\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", node=~\"$node\"}) by (pod) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", node=~\"$node\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", node=~\"$node\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", node=~\"$node\"}) by (pod) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", node=~\"$node\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_memory_working_set_bytes{cluster=\"$cluster\", node=~\"$node\", container!=\"\"}) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage (w/o cache)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Memory Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Usage (RSS)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Usage (Cache)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #G", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Usage (Swap)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #H", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_memory_working_set_bytes{cluster=\"$cluster\", node=~\"$node\",container!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", node=~\"$node\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_memory_working_set_bytes{cluster=\"$cluster\", node=~\"$node\",container!=\"\"}) by (pod) / sum(kube_pod_container_resource_requests_memory_bytes{node=~\"$node\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", node=~\"$node\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_memory_working_set_bytes{cluster=\"$cluster\", node=~\"$node\",container!=\"\"}) by (pod) / sum(kube_pod_container_resource_limits_memory_bytes{node=~\"$node\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_memory_rss{cluster=\"$cluster\", node=~\"$node\",container!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_memory_cache{cluster=\"$cluster\", node=~\"$node\",container!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_memory_swap{cluster=\"$cluster\", node=~\"$node\",container!=\"\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Quota", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": true, + "name": "node", + "options": [ + + ], + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, node)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Compute Resources / Node (Pods)", + "uid": "200ac8fdbfbb74b39aff88118e4d1c2c", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml new file mode 100755 index 000000000..6badcf173 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-pod.yaml @@ -0,0 +1,1772 @@ +{{- /* +Generated from 'k8s-resources-pod' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-pod" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + k8s-resources-pod.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "requests", + "color": "#F2495C", + "fill": 0, + "hideTooltip": true, + "legend": true, + "linewidth": 2, + "stack": false + }, + { + "alias": "limits", + "color": "#FF9830", + "fill": 0, + "hideTooltip": true, + "legend": true, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", cluster=\"$cluster\"}) by (container)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}container{{`}}`}}", + "legendLink": null, + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"})\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "requests", + "legendLink": null, + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"})\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "limits", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 2, + "legend": { + "avg": false, + "current": true, + "max": true, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(increase(container_cpu_cfs_throttled_periods_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container!=\"\", cluster=\"$cluster\"}[5m])) by (container) /sum(increase(container_cpu_cfs_periods_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container!=\"\", cluster=\"$cluster\"}[5m])) by (container)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}container{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 0.25, + "yaxis": "left" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Throttling", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Throttling", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "CPU Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "CPU Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Container", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "container", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "requests", + "color": "#F2495C", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + }, + { + "alias": "limits", + "color": "#FF9830", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container!=\"\", image!=\"\"}) by (container)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}container{{`}}`}}", + "legendLink": null, + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"})\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "requests", + "legendLink": null, + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"})\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "limits", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Memory Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Usage (RSS)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Usage (Cache)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #G", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Usage (Swap)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #H", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Container", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "container", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"POD\", container!=\"\", image!=\"\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", image!=\"\"}) by (container) / sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"\", image!=\"\"}) by (container) / sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container != \"\", container != \"POD\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container != \"\", container != \"POD\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sum(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container != \"\", container != \"POD\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 6, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_bytes_total{namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Receive Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 7, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_bytes_total{namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Transmit Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 8, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_packets_total{namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 9, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_packets_total{namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 10, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_packets_dropped_total{namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets Dropped", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 11, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_packets_dropped_total{namespace=~\"$namespace\", pod=~\"$pod\"}[$__rate_interval])) by (pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets Dropped", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "pod", + "options": [ + + ], + "query": "label_values(kube_pod_info{cluster=\"$cluster\", namespace=\"$namespace\"}, pod)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Compute Resources / Pod", + "uid": "6581e46e4e5c7ba40a07646395ef7b23", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml new file mode 100755 index 000000000..931934f23 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-workload.yaml @@ -0,0 +1,2034 @@ +{{- /* +Generated from 'k8s-resources-workload' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-workload" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + k8s-resources-workload.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "CPU Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "CPU Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Memory Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 5, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Current Receive Bandwidth", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Current Transmit Bandwidth", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Rate of Received Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Received Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "(sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "(sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "(sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "(sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "(sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "(sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Network Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Receive Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Transmit Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(avg(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Average Container Bandwidth by Pod: Received", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(avg(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Average Container Bandwidth by Pod: Transmitted", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 11, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets Dropped", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 13, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets Dropped", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "workload", + "options": [ + + ], + "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\"}, workload)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "type", + "options": [ + + ], + "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\"}, workload_type)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Compute Resources / Workload", + "uid": "a164a7f0339f99e89cea5cb47e9be617", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml new file mode 100755 index 000000000..dd3b25065 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/k8s-resources-workloads-namespace.yaml @@ -0,0 +1,2195 @@ +{{- /* +Generated from 'k8s-resources-workloads-namespace' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-workloads-namespace" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + k8s-resources-workloads-namespace.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "quota - requests", + "color": "#F2495C", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + }, + { + "alias": "quota - limits", + "color": "#FF9830", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}workload{{`}}`}} - {{`{{`}}workload_type{{`}}`}}", + "legendLink": null, + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"requests.cpu\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - requests", + "legendLink": null, + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"limits.cpu\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - limits", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Running Pods", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "CPU Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Workload", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "./d/a164a7f0339f99e89cea5cb47e9be617/k8s-resources-workload?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-workload=$__cell&var-type=$__cell_2", + "pattern": "workload", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Workload Type", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "workload_type", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "count(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}) by (workload, workload_type)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(\n node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "quota - requests", + "color": "#F2495C", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + }, + { + "alias": "quota - limits", + "color": "#FF9830", + "dashes": true, + "fill": 0, + "hideTooltip": true, + "legend": false, + "linewidth": 2, + "stack": false + } + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}workload{{`}}`}} - {{`{{`}}workload_type{{`}}`}}", + "legendLink": null, + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"requests.memory\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - requests", + "legendLink": null, + "step": 10 + }, + { + "expr": "scalar(kube_resourcequota{cluster=\"$cluster\", namespace=\"$namespace\", type=\"hard\",resource=\"limits.memory\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "quota - limits", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Running Pods", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Memory Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Workload", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "./d/a164a7f0339f99e89cea5cb47e9be617/k8s-resources-workload?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-workload=$__cell&var-type=$__cell_2", + "pattern": "workload", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Workload Type", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "workload_type", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "count(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}) by (workload, workload_type)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(\n container_memory_working_set_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container!=\"\", image!=\"\"}\n * on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n* on(namespace,pod)\n group_left(workload, workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=\"$namespace\", workload_type=\"$type\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 5, + "interval": "1m", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Current Receive Bandwidth", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Current Transmit Bandwidth", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Rate of Received Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Received Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Workload", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTargetBlank": false, + "linkTooltip": "Drill down to pods", + "linkUrl": "./d/a164a7f0339f99e89cea5cb47e9be617/k8s-resources-workload?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-workload=$__cell&var-type=$type", + "pattern": "workload", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Workload Type", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "workload_type", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "(sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload_type=\"$type\"}) by (workload))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "(sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload_type=\"$type\"}) by (workload))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "(sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload_type=\"$type\"}) by (workload))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "(sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload_type=\"$type\"}) by (workload))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "(sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload_type=\"$type\"}) by (workload))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "(sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload_type=\"$type\"}) by (workload))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Network Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}workload{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Receive Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}workload{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Transmit Bandwidth", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(avg(irate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}workload{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Average Container Bandwidth by Workload: Received", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(avg(irate(container_network_transmit_bytes_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}workload{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Average Container Bandwidth by Workload: Transmitted", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(irate(container_network_receive_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}workload{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 11, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}workload{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 12, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}workload{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets Dropped", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 13, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\", namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}workload{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets Dropped", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "deployment", + "value": "deployment" + }, + "datasource": "$datasource", + "definition": "label_values(namespace_workload_pod:kube_pod_owner:relabel{namespace=~\"$namespace\", workload=~\".+\"}, workload_type)", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "type", + "options": [ + + ], + "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{namespace=~\"$namespace\", workload=~\".+\"}, workload_type)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": null, + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Compute Resources / Namespace (Workloads)", + "uid": "a87fb0d919ec0ea5f6543124e16c42a5", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/kubelet.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/kubelet.yaml new file mode 100755 index 000000000..f72ff5875 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/kubelet.yaml @@ -0,0 +1,2533 @@ +{{- /* +Generated from 'kubelet' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.kubelet.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "kubelet" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + kubelet.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 2, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(up{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Up", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "min" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 3, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(kubelet_running_pods{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}) OR sum(kubelet_running_pod_count{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": "", + "title": "Running Pods", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "min" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 4, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(kubelet_running_containers{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}) OR sum(kubelet_running_container_count{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": "", + "title": "Running Container", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "min" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 5, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(volume_manager_total_volumes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\", state=\"actual_state_of_world\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": "", + "title": "Actual Volume Count", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "min" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 6, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(volume_manager_total_volumes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\",state=\"desired_state_of_world\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": "", + "title": "Desired Volume Count", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "min" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 7, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(rate(kubelet_node_config_error{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": "", + "title": "Config Error Count", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "min" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 8, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(kubelet_runtime_operations_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (operation_type, instance)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}operation_type{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Operation Rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 9, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(kubelet_runtime_operations_errors_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, operation_type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}operation_type{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Operation Error Rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 10, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(kubelet_runtime_operations_duration_seconds_bucket{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, operation_type, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}operation_type{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Operation duration 99th quantile", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 11, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(kubelet_pod_start_duration_seconds_count{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} pod", + "refId": "A" + }, + { + "expr": "sum(rate(kubelet_pod_worker_duration_seconds_count{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} worker", + "refId": "B" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Pod Start Rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 12, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(kubelet_pod_start_duration_seconds_count{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} pod", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(kubelet_pod_worker_duration_seconds_bucket{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} worker", + "refId": "B" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Pod Start Duration", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 13, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(storage_operation_duration_seconds_count{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, operation_name, volume_plugin)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}operation_name{{`}}`}} {{`{{`}}volume_plugin{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Storage Operation Rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 14, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(storage_operation_errors_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, operation_name, volume_plugin)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}operation_name{{`}}`}} {{`{{`}}volume_plugin{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Storage Operation Error Rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 15, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(storage_operation_duration_seconds_bucket{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m])) by (instance, operation_name, volume_plugin, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}operation_name{{`}}`}} {{`{{`}}volume_plugin{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Storage Operation Duration 99th quantile", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 16, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(kubelet_cgroup_manager_duration_seconds_count{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m])) by (instance, operation_type)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}operation_type{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Cgroup manager operation rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 17, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(kubelet_cgroup_manager_duration_seconds_bucket{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m])) by (instance, operation_type, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}operation_type{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Cgroup manager 99th quantile", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Pod lifecycle event generator", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 18, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(kubelet_pleg_relist_duration_seconds_count{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m])) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "PLEG relist rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 19, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_interval_seconds_bucket{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "PLEG relist interval", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 20, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])) by (instance, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "PLEG relist duration", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 21, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\",code=~\"2..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "2xx", + "refId": "A" + }, + { + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\",code=~\"3..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "3xx", + "refId": "B" + }, + { + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\",code=~\"4..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "4xx", + "refId": "C" + }, + { + "expr": "sum(rate(rest_client_requests_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\",code=~\"5..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "5xx", + "refId": "D" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "RPC Rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 22, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\", instance=~\"$instance\"}[5m])) by (instance, verb, url, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}verb{{`}}`}} {{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Request duration 99th quantile", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 23, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "process_resident_memory_bytes{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 24, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(process_cpu_seconds_total{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 25, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_goroutines{cluster=\"$cluster\",job=\"kubelet\", metrics_path=\"/metrics\",instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Goroutines", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "instance", + "options": [ + + ], + "query": "label_values(kubelet_runtime_operations_total{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\"}, instance)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Kubelet", + "uid": "3138fa155d5915769fbded898ac09fd9", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/namespace-by-pod.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/namespace-by-pod.yaml new file mode 100755 index 000000000..20def0a8d --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/namespace-by-pod.yaml @@ -0,0 +1,1464 @@ +{{- /* +Generated from 'namespace-by-pod' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "namespace-by-pod" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + namespace-by-pod.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ + + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Current Bandwidth", + "titleSize": "h6", + "type": "row" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 0, + "format": "time_series", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 1 + }, + "height": 9, + "id": 3, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "minSpan": 12, + "nullPointMode": "connected", + "nullText": null, + "options": { + "fieldOptions": { + "calcs": [ + "last" + ], + "defaults": { + "max": 10000000000, + "min": 0, + "title": "$namespace", + "unit": "Bps" + }, + "mappings": [ + + ], + "override": { + + }, + "thresholds": [ + { + "color": "dark-green", + "index": 0, + "value": null + }, + { + "color": "dark-yellow", + "index": 1, + "value": 5000000000 + }, + { + "color": "dark-red", + "index": 2, + "value": 7000000000 + } + ], + "values": false + } + }, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 12, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution]))", + "format": "time_series", + "instant": null, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Current Rate of Bytes Received", + "type": "gauge", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 0, + "format": "time_series", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 1 + }, + "height": 9, + "id": 4, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "minSpan": 12, + "nullPointMode": "connected", + "nullText": null, + "options": { + "fieldOptions": { + "calcs": [ + "last" + ], + "defaults": { + "max": 10000000000, + "min": 0, + "title": "$namespace", + "unit": "Bps" + }, + "mappings": [ + + ], + "override": { + + }, + "thresholds": [ + { + "color": "dark-green", + "index": 0, + "value": null + }, + { + "color": "dark-yellow", + "index": 1, + "value": 5000000000 + }, + { + "color": "dark-red", + "index": 2, + "value": 7000000000 + } + ], + "values": false + } + }, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 12, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution]))", + "format": "time_series", + "instant": null, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Current Rate of Bytes Transmitted", + "type": "gauge", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "columns": [ + { + "text": "Time", + "value": "Time" + }, + { + "text": "Value #A", + "value": "Value #A" + }, + { + "text": "Value #B", + "value": "Value #B" + }, + { + "text": "Value #C", + "value": "Value #C" + }, + { + "text": "Value #D", + "value": "Value #D" + }, + { + "text": "Value #E", + "value": "Value #E" + }, + { + "text": "Value #F", + "value": "Value #F" + }, + { + "text": "pod", + "value": "pod" + } + ], + "datasource": "$datasource", + "fill": 1, + "fontSize": "100%", + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 5, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "null as zero", + "renderer": "flot", + "scroll": true, + "showHeader": true, + "sort": { + "col": 0, + "desc": false + }, + "spaceLength": 10, + "span": 24, + "styles": [ + { + "alias": "Time", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Time", + "thresholds": [ + + ], + "type": "hidden", + "unit": "short" + }, + { + "alias": "Bandwidth Received", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Bandwidth Transmitted", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Rate of Received Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Received Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down", + "linkUrl": "d/7a18067ce943a40ae25454675c19ff5c/kubernetes-networking-pod?orgId=1&refresh=30s&var-namespace=$namespace&var-pod=$__cell", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(irate(container_network_receive_packets_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Status", + "type": "table" + }, + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 19 + }, + "id": 6, + "panels": [ + + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Bandwidth", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 20 + }, + "id": 7, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])) by (pod)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Receive Bandwidth", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 20 + }, + "id": 8, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])) by (pod)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Transmit Bandwidth", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "collapse": true, + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 29 + }, + "id": 9, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 30 + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_packets_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])) by (pod)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 30 + }, + "id": 11, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])) by (pod)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Packets", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": true, + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 30 + }, + "id": 12, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 40 + }, + "id": 13, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])) by (pod)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets Dropped", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 40 + }, + "id": 14, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])) by (pod)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets Dropped", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Errors", + "titleSize": "h6", + "type": "row" + } + ], + "refresh": "10s", + "rows": [ + + ], + "schemaVersion": 18, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "kube-system", + "value": "kube-system" + }, + "datasource": "$datasource", + "definition": "label_values(container_network_receive_packets_total{cluster=\"$cluster\"}, namespace)", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(container_network_receive_packets_total{cluster=\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "5m", + "value": "5m" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "resolution", + "options": [ + { + "selected": false, + "text": "30s", + "value": "30s" + }, + { + "selected": true, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + } + ], + "query": "30s,5m,1h", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "interval", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "5m", + "value": "5m" + }, + "datasource": "$datasource", + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "interval", + "options": [ + { + "selected": true, + "text": "4h", + "value": "4h" + } + ], + "query": "4h", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "interval", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Networking / Namespace (Pods)", + "uid": "8b7a8b326d7a6f1f04244066368c67af", + "version": 0 + } +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/namespace-by-workload.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/namespace-by-workload.yaml new file mode 100755 index 000000000..adecffa09 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/namespace-by-workload.yaml @@ -0,0 +1,1736 @@ +{{- /* +Generated from 'namespace-by-workload' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "namespace-by-workload" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + namespace-by-workload.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ + + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Current Bandwidth", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": { + + }, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 3, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}} workload {{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Rate of Bytes Received", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}} workload {{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Rate of Bytes Transmitted", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "columns": [ + { + "text": "Time", + "value": "Time" + }, + { + "text": "Value #A", + "value": "Value #A" + }, + { + "text": "Value #B", + "value": "Value #B" + }, + { + "text": "Value #C", + "value": "Value #C" + }, + { + "text": "Value #D", + "value": "Value #D" + }, + { + "text": "Value #E", + "value": "Value #E" + }, + { + "text": "Value #F", + "value": "Value #F" + }, + { + "text": "Value #G", + "value": "Value #G" + }, + { + "text": "Value #H", + "value": "Value #H" + }, + { + "text": "workload", + "value": "workload" + } + ], + "datasource": "$datasource", + "fill": 1, + "fontSize": "90%", + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 5, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "null as zero", + "renderer": "flot", + "scroll": true, + "showHeader": true, + "sort": { + "col": 0, + "desc": false + }, + "spaceLength": 10, + "span": 24, + "styles": [ + { + "alias": "Time", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Time", + "thresholds": [ + + ], + "type": "hidden", + "unit": "short" + }, + { + "alias": "Current Bandwidth Received", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Current Bandwidth Transmitted", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Average Bandwidth Received", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Average Bandwidth Transmitted", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "Bps" + }, + { + "alias": "Rate of Received Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Received Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #G", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Rate of Transmitted Packets Dropped", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #H", + "thresholds": [ + + ], + "type": "number", + "unit": "pps" + }, + { + "alias": "Workload", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down", + "linkUrl": "d/728bf77cc1166d2f3133bf25846876cc/kubernetes-networking-workload?orgId=1&refresh=30s&var-namespace=$namespace&var-type=$type&var-workload=$__cell", + "pattern": "workload", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sort_desc(sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sort_desc(avg(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sort_desc(avg(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sort_desc(sum(irate(container_network_receive_packets_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sort_desc(sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sort_desc(sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sort_desc(sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Status", + "type": "table" + }, + { + "collapse": true, + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 19 + }, + "id": 6, + "panels": [ + { + "aliasColors": { + + }, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 20 + }, + "id": 7, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(avg(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}} workload {{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Average Rate of Bytes Received", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 20 + }, + "id": 8, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(avg(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}} workload {{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Average Rate of Bytes Transmitted", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Average Bandwidth", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 29 + }, + "id": 9, + "panels": [ + + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Bandwidth HIstory", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 38 + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}workload{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Receive Bandwidth", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 38 + }, + "id": 11, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}workload{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Transmit Bandwidth", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "collapse": true, + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 39 + }, + "id": 12, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 40 + }, + "id": 13, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_packets_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}workload{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 40 + }, + "id": 14, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}workload{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Packets", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": true, + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 40 + }, + "id": 15, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 41 + }, + "id": 16, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}workload{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets Dropped", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 41 + }, + "id": 17, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\", workload_type=\"$type\"}) by (workload))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}workload{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets Dropped", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Errors", + "titleSize": "h6", + "type": "row" + } + ], + "refresh": "10s", + "rows": [ + + ], + "schemaVersion": 18, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "kube-system", + "value": "kube-system" + }, + "datasource": "$datasource", + "definition": "label_values(container_network_receive_packets_total{cluster=\"$cluster\"}, namespace)", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(container_network_receive_packets_total{cluster=\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "deployment", + "value": "deployment" + }, + "datasource": "$datasource", + "definition": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\"}, workload_type)", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "type", + "options": [ + + ], + "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\".+\"}, workload_type)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "5m", + "value": "5m" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "resolution", + "options": [ + { + "selected": false, + "text": "30s", + "value": "30s" + }, + { + "selected": true, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + } + ], + "query": "30s,5m,1h", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "interval", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "5m", + "value": "5m" + }, + "datasource": "$datasource", + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "interval", + "options": [ + { + "selected": true, + "text": "4h", + "value": "4h" + } + ], + "query": "4h", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "interval", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Networking / Namespace (Workload)", + "uid": "bbb2a765a623ae38130206c7d94a160f", + "version": 0 + } +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml new file mode 100755 index 000000000..7ef72f97a --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/node-cluster-rsrc-use.yaml @@ -0,0 +1,964 @@ +{{- /* +Generated from 'node-cluster-rsrc-use' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.nodeExporter.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "node-cluster-rsrc-use" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + node-cluster-rsrc-use.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(\n instance:node_cpu_utilisation:rate1m{job=\"node-exporter\"}\n*\n instance:node_num_cpu:sum{job=\"node-exporter\"}\n)\n/ scalar(sum(instance:node_num_cpu:sum{job=\"node-exporter\"}))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_load1_per_cpu:ratio{job=\"node-exporter\"}\n/ scalar(count(instance:node_load1_per_cpu:ratio{job=\"node-exporter\"}))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Saturation (load1 per CPU)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_memory_utilisation:ratio{job=\"node-exporter\"}\n/ scalar(count(instance:node_memory_utilisation:ratio{job=\"node-exporter\"}))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilisation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_vmstat_pgmajfault:rate1m{job=\"node-exporter\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Saturation (Major Page Faults)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "rps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/ Receive/", + "stack": "A" + }, + { + "alias": "/ Transmit/", + "stack": "B", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_network_receive_bytes_excluding_lo:rate1m{job=\"node-exporter\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Receive", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "step": 10 + }, + { + "expr": "instance:node_network_transmit_bytes_excluding_lo:rate1m{job=\"node-exporter\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Transmit", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Net Utilisation (Bytes Receive/Transmit)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/ Receive/", + "stack": "A" + }, + { + "alias": "/ Transmit/", + "stack": "B", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_network_receive_drop_excluding_lo:rate1m{job=\"node-exporter\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Receive", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "step": 10 + }, + { + "expr": "instance:node_network_transmit_drop_excluding_lo:rate1m{job=\"node-exporter\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Transmit", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Net Saturation (Drops Receive/Transmit)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "rps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "instance_device:node_disk_io_time_seconds:rate1m{job=\"node-exporter\"}\n/ scalar(count(instance_device:node_disk_io_time_seconds:rate1m{job=\"node-exporter\"}))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}device{{`}}`}}", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk IO Utilisation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "instance_device:node_disk_io_time_weighted_seconds:rate1m{job=\"node-exporter\"}\n/ scalar(count(instance_device:node_disk_io_time_weighted_seconds:rate1m{job=\"node-exporter\"}))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} {{`{{`}}device{{`}}`}}", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk IO Saturation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Disk IO", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum without (device) (\n max without (fstype, mountpoint) (\n node_filesystem_size_bytes{job=\"node-exporter\", fstype!=\"\"} - node_filesystem_avail_bytes{job=\"node-exporter\", fstype!=\"\"}\n )\n) \n/ scalar(sum(max without (fstype, mountpoint) (node_filesystem_size_bytes{job=\"node-exporter\", fstype!=\"\"})))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "legendLink": "/dashboard/file/node-rsrc-use.json", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk Space Utilisation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Disk Space", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "utc", + "title": "USE Method / Cluster", + "uid": "", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/node-rsrc-use.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/node-rsrc-use.yaml new file mode 100755 index 000000000..9defce091 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/node-rsrc-use.yaml @@ -0,0 +1,991 @@ +{{- /* +Generated from 'node-rsrc-use' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.nodeExporter.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "node-rsrc-use" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + node-rsrc-use.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_cpu_utilisation:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Utilisation", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_load1_per_cpu:ratio{job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Saturation", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Saturation (Load1 per CPU)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_memory_utilisation:ratio{job=\"node-exporter\", job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Memory", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilisation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_vmstat_pgmajfault:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Major page faults", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Saturation (Major Page Faults)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/Receive/", + "stack": "A" + }, + { + "alias": "/Transmit/", + "stack": "B", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_network_receive_bytes_excluding_lo:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Receive", + "legendLink": null, + "step": 10 + }, + { + "expr": "instance:node_network_transmit_bytes_excluding_lo:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Transmit", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Net Utilisation (Bytes Receive/Transmit)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/Receive/", + "stack": "A" + }, + { + "alias": "/Transmit/", + "stack": "B", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance:node_network_receive_drop_excluding_lo:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Receive drops", + "legendLink": null, + "step": 10 + }, + { + "expr": "instance:node_network_transmit_drop_excluding_lo:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Transmit drops", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Net Saturation (Drops Receive/Transmit)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "rps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Net", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance_device:node_disk_io_time_seconds:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}device{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk IO Utilisation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "instance_device:node_disk_io_time_weighted_seconds:rate1m{job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}device{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk IO Saturation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Disk IO", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 -\n(\n max without (mountpoint, fstype) (node_filesystem_avail_bytes{job=\"node-exporter\", fstype!=\"\", instance=\"$instance\"})\n/\n max without (mountpoint, fstype) (node_filesystem_size_bytes{job=\"node-exporter\", fstype!=\"\", instance=\"$instance\"})\n)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}device{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk Space Utilisation", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Disk Space", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "instance", + "multi": false, + "name": "instance", + "options": [ + + ], + "query": "label_values(up{job=\"node-exporter\"}, instance)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "utc", + "title": "USE Method / Node", + "uid": "", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/nodes.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/nodes.yaml new file mode 100755 index 000000000..8c67344c4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/nodes.yaml @@ -0,0 +1,997 @@ +{{- /* +Generated from 'nodes' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "nodes" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + nodes.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "refresh": "", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 2, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(\n (1 - rate(node_cpu_seconds_total{job=\"node-exporter\", mode=\"idle\", instance=\"$instance\"}[$__interval]))\n/ ignoring(cpu) group_left\n count without (cpu)( node_cpu_seconds_total{job=\"node-exporter\", mode=\"idle\", instance=\"$instance\"})\n)\n", + "format": "time_series", + "interval": "1m", + "intervalFactor": 5, + "legendFormat": "{{`{{`}}cpu{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "fillGradient": 0, + "gridPos": { + + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "node_load1{job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "1m load average", + "refId": "A" + }, + { + "expr": "node_load5{job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "5m load average", + "refId": "B" + }, + { + "expr": "node_load15{job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "15m load average", + "refId": "C" + }, + { + "expr": "count(node_cpu_seconds_total{job=\"node-exporter\", instance=\"$instance\", mode=\"idle\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "logical cores", + "refId": "D" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Load Average", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 9, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(\n node_memory_MemTotal_bytes{job=\"node-exporter\", instance=\"$instance\"}\n-\n node_memory_MemFree_bytes{job=\"node-exporter\", instance=\"$instance\"}\n-\n node_memory_Buffers_bytes{job=\"node-exporter\", instance=\"$instance\"}\n-\n node_memory_Cached_bytes{job=\"node-exporter\", instance=\"$instance\"}\n)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "memory used", + "refId": "A" + }, + { + "expr": "node_memory_Buffers_bytes{job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "memory buffers", + "refId": "B" + }, + { + "expr": "node_memory_Cached_bytes{job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "memory cached", + "refId": "C" + }, + { + "expr": "node_memory_MemFree_bytes{job=\"node-exporter\", instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "memory free", + "refId": "D" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 5, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "100 -\n(\n avg(node_memory_MemAvailable_bytes{job=\"node-exporter\", instance=\"$instance\"})\n/\n avg(node_memory_MemTotal_bytes{job=\"node-exporter\", instance=\"$instance\"})\n* 100\n)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "80, 90", + "title": "Memory Usage", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "fillGradient": 0, + "gridPos": { + + }, + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "/ read| written/", + "yaxis": 1 + }, + { + "alias": "/ io time/", + "yaxis": 2 + } + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(node_disk_read_bytes_total{job=\"node-exporter\", instance=\"$instance\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\"}[$__interval])", + "format": "time_series", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}device{{`}}`}} read", + "refId": "A" + }, + { + "expr": "rate(node_disk_written_bytes_total{job=\"node-exporter\", instance=\"$instance\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\"}[$__interval])", + "format": "time_series", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}device{{`}}`}} written", + "refId": "B" + }, + { + "expr": "rate(node_disk_io_time_seconds_total{job=\"node-exporter\", instance=\"$instance\", device=~\"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+\"}[$__interval])", + "format": "time_series", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}device{{`}}`}} io time", + "refId": "C" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk I/O", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 7, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "used", + "color": "#E0B400" + }, + { + "alias": "available", + "color": "#73BF69" + } + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n max by (device) (\n node_filesystem_size_bytes{job=\"node-exporter\", instance=\"$instance\", fstype!=\"\"}\n -\n node_filesystem_avail_bytes{job=\"node-exporter\", instance=\"$instance\", fstype!=\"\"}\n )\n)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "used", + "refId": "A" + }, + { + "expr": "sum(\n max by (device) (\n node_filesystem_avail_bytes{job=\"node-exporter\", instance=\"$instance\", fstype!=\"\"}\n )\n)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "available", + "refId": "B" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk Space Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "fillGradient": 0, + "gridPos": { + + }, + "id": 8, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(node_network_receive_bytes_total{job=\"node-exporter\", instance=\"$instance\", device!=\"lo\"}[$__interval])", + "format": "time_series", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}device{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Network Received", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 0, + "fillGradient": 0, + "gridPos": { + + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(node_network_transmit_bytes_total{job=\"node-exporter\", instance=\"$instance\", device!=\"lo\"}[$__interval])", + "format": "time_series", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}device{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Network Transmitted", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + + ], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "instance", + "options": [ + + ], + "query": "label_values(node_exporter_build_info{job=\"node-exporter\"}, instance)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Nodes", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml new file mode 100755 index 000000000..180087aa3 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/persistentvolumesusage.yaml @@ -0,0 +1,577 @@ +{{- /* +Generated from 'persistentvolumesusage' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "persistentvolumesusage" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + persistentvolumesusage.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 9, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(\n sum without(instance, node) (kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n -\n sum without(instance, node) (kubelet_volume_stats_available_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Used Space", + "refId": "A" + }, + { + "expr": "sum without(instance, node) (kubelet_volume_stats_available_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Free Space", + "refId": "B" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Volume Space Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 3, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "max without(instance,node) (\n(\n kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n -\n kubelet_volume_stats_available_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n)\n/\nkubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n* 100)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "80, 90", + "title": "Volume Space Usage", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 9, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum without(instance, node) (kubelet_volume_stats_inodes_used{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Used inodes", + "refId": "A" + }, + { + "expr": "(\n sum without(instance, node) (kubelet_volume_stats_inodes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n -\n sum without(instance, node) (kubelet_volume_stats_inodes_used{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " Free inodes", + "refId": "B" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Volume inodes Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 5, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "max without(instance,node) (\nkubelet_volume_stats_inodes_used{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n/\nkubelet_volume_stats_inodes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n* 100)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "80, 90", + "title": "Volume inodes Usage", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kubelet_volume_stats_capacity_bytes, cluster)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\"}, namespace)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "PersistentVolumeClaim", + "multi": false, + "name": "volume", + "options": [ + + ], + "query": "label_values(kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", metrics_path=\"/metrics\", namespace=\"$namespace\"}, persistentvolumeclaim)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-7d", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Persistent Volumes", + "uid": "919b92a8e8041bd567af9edab12c840c", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/pod-total.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/pod-total.yaml new file mode 100755 index 000000000..1790df788 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/pod-total.yaml @@ -0,0 +1,1228 @@ +{{- /* +Generated from 'pod-total' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "pod-total" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + pod-total.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ + + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Current Bandwidth", + "titleSize": "h6", + "type": "row" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 0, + "format": "time_series", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 1 + }, + "height": 9, + "id": 3, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "minSpan": 12, + "nullPointMode": "connected", + "nullText": null, + "options": { + "fieldOptions": { + "calcs": [ + "last" + ], + "defaults": { + "max": 10000000000, + "min": 0, + "title": "$namespace: $pod", + "unit": "Bps" + }, + "mappings": [ + + ], + "override": { + + }, + "thresholds": [ + { + "color": "dark-green", + "index": 0, + "value": null + }, + { + "color": "dark-yellow", + "index": 1, + "value": 5000000000 + }, + { + "color": "dark-red", + "index": 2, + "value": 7000000000 + } + ], + "values": false + } + }, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 12, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\", pod=~\"$pod\"}[$interval:$resolution]))", + "format": "time_series", + "instant": null, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Current Rate of Bytes Received", + "type": "gauge", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "decimals": 0, + "format": "time_series", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 1 + }, + "height": 9, + "id": 4, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "minSpan": 12, + "nullPointMode": "connected", + "nullText": null, + "options": { + "fieldOptions": { + "calcs": [ + "last" + ], + "defaults": { + "max": 10000000000, + "min": 0, + "title": "$namespace: $pod", + "unit": "Bps" + }, + "mappings": [ + + ], + "override": { + + }, + "thresholds": [ + { + "color": "dark-green", + "index": 0, + "value": null + }, + { + "color": "dark-yellow", + "index": 1, + "value": 5000000000 + }, + { + "color": "dark-red", + "index": 2, + "value": 7000000000 + } + ], + "values": false + } + }, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 12, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\", pod=~\"$pod\"}[$interval:$resolution]))", + "format": "time_series", + "instant": null, + "intervalFactor": 1, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "timeFrom": null, + "timeShift": null, + "title": "Current Rate of Bytes Transmitted", + "type": "gauge", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 5, + "panels": [ + + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Bandwidth", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 11 + }, + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\", pod=~\"$pod\"}[$interval:$resolution])) by (pod)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Receive Bandwidth", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 11 + }, + "id": 7, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\", pod=~\"$pod\"}[$interval:$resolution])) by (pod)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Transmit Bandwidth", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "collapse": true, + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 20 + }, + "id": 8, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 21 + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_packets_total{cluster=\"$cluster\",namespace=~\"$namespace\", pod=~\"$pod\"}[$interval:$resolution])) by (pod)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 21 + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\",namespace=~\"$namespace\", pod=~\"$pod\"}[$interval:$resolution])) by (pod)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Packets", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": true, + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 21 + }, + "id": 11, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 32 + }, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\",namespace=~\"$namespace\", pod=~\"$pod\"}[$interval:$resolution])) by (pod)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets Dropped", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 32 + }, + "id": 13, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\",namespace=~\"$namespace\", pod=~\"$pod\"}[$interval:$resolution])) by (pod)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets Dropped", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Errors", + "titleSize": "h6", + "type": "row" + } + ], + "refresh": "10s", + "rows": [ + + ], + "schemaVersion": 18, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "kube-system", + "value": "kube-system" + }, + "datasource": "$datasource", + "definition": "label_values(container_network_receive_packets_total{cluster=\"$cluster\"}, namespace)", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(container_network_receive_packets_total{cluster=\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(container_network_receive_packets_total{cluster=\"$cluster\",namespace=~\"$namespace\"}, pod)", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "pod", + "options": [ + + ], + "query": "label_values(container_network_receive_packets_total{cluster=\"$cluster\",namespace=~\"$namespace\"}, pod)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "5m", + "value": "5m" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "resolution", + "options": [ + { + "selected": false, + "text": "30s", + "value": "30s" + }, + { + "selected": true, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + } + ], + "query": "30s,5m,1h", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "interval", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "5m", + "value": "5m" + }, + "datasource": "$datasource", + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "interval", + "options": [ + { + "selected": true, + "text": "4h", + "value": "4h" + } + ], + "query": "4h", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "interval", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Networking / Pod", + "uid": "7a18067ce943a40ae25454675c19ff5c", + "version": 0 + } +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml new file mode 100755 index 000000000..89c6c4be1 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/prometheus-remote-write.yaml @@ -0,0 +1,1670 @@ +{{- /* +Generated from 'prometheus-remote-write' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.prometheus.prometheusSpec.remoteWriteDashboards }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "prometheus-remote-write" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + prometheus-remote-write.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "refresh": "60s", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 2, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "(\n prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~\"$cluster\", instance=~\"$instance\"} \n- \n ignoring(remote_name, url) group_right(instance) (prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~\"$cluster\", instance=~\"$instance\"} != 0)\n)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cluster{{`}}`}}:{{`{{`}}instance{{`}}`}} {{`{{`}}remote_name{{`}}`}}:{{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Highest Timestamp In vs. Highest Timestamp Sent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "clamp_min(\n rate(prometheus_remote_storage_highest_timestamp_in_seconds{cluster=~\"$cluster\", instance=~\"$instance\"}[5m]) \n- \n ignoring (remote_name, url) group_right(instance) rate(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{cluster=~\"$cluster\", instance=~\"$instance\"}[5m])\n, 0)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cluster{{`}}`}}:{{`{{`}}instance{{`}}`}} {{`{{`}}remote_name{{`}}`}}:{{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate[5m]", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Timestamps", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 4, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(\n prometheus_remote_storage_samples_in_total{cluster=~\"$cluster\", instance=~\"$instance\"}[5m])\n- \n ignoring(remote_name, url) group_right(instance) (rate(prometheus_remote_storage_succeeded_samples_total{cluster=~\"$cluster\", instance=~\"$instance\"}[5m]) or rate(prometheus_remote_storage_samples_total{cluster=~\"$cluster\", instance=~\"$instance\"}[5m]))\n- \n (rate(prometheus_remote_storage_dropped_samples_total{cluster=~\"$cluster\", instance=~\"$instance\"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~\"$cluster\", instance=~\"$instance\"}[5m]))\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cluster{{`}}`}}:{{`{{`}}instance{{`}}`}} {{`{{`}}remote_name{{`}}`}}:{{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate, in vs. succeeded or dropped [5m]", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Samples", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "minSpan": 6, + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_remote_storage_shards{cluster=~\"$cluster\", instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cluster{{`}}`}}:{{`{{`}}instance{{`}}`}} {{`{{`}}remote_name{{`}}`}}:{{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Shards", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_remote_storage_shards_max{cluster=~\"$cluster\", instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cluster{{`}}`}}:{{`{{`}}instance{{`}}`}} {{`{{`}}remote_name{{`}}`}}:{{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Max Shards", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 7, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_remote_storage_shards_min{cluster=~\"$cluster\", instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cluster{{`}}`}}:{{`{{`}}instance{{`}}`}} {{`{{`}}remote_name{{`}}`}}:{{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Min Shards", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 8, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_remote_storage_shards_desired{cluster=~\"$cluster\", instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cluster{{`}}`}}:{{`{{`}}instance{{`}}`}} {{`{{`}}remote_name{{`}}`}}:{{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Desired Shards", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Shards", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_remote_storage_shard_capacity{cluster=~\"$cluster\", instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cluster{{`}}`}}:{{`{{`}}instance{{`}}`}} {{`{{`}}remote_name{{`}}`}}:{{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Shard Capacity", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_remote_storage_pending_samples{cluster=~\"$cluster\", instance=~\"$instance\"} or prometheus_remote_storage_samples_pending{cluster=~\"$cluster\", instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cluster{{`}}`}}:{{`{{`}}instance{{`}}`}} {{`{{`}}remote_name{{`}}`}}:{{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Pending Samples", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Shard Details", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 11, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_tsdb_wal_segment_current{cluster=~\"$cluster\", instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cluster{{`}}`}}:{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "TSDB Current Segment", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_wal_watcher_current_segment{cluster=~\"$cluster\", instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cluster{{`}}`}}:{{`{{`}}instance{{`}}`}} {{`{{`}}consumer{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Remote Write Current Segment", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Segments", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 13, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_remote_storage_dropped_samples_total{cluster=~\"$cluster\", instance=~\"$instance\"}[5m]) or rate(prometheus_remote_storage_samples_dropped_total{cluster=~\"$cluster\", instance=~\"$instance\"}[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cluster{{`}}`}}:{{`{{`}}instance{{`}}`}} {{`{{`}}remote_name{{`}}`}}:{{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Dropped Samples", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 14, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_remote_storage_failed_samples_total{cluster=~\"$cluster\", instance=~\"$instance\"}[5m]) or rate(prometheus_remote_storage_samples_failed_total{cluster=~\"$cluster\", instance=~\"$instance\"}[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cluster{{`}}`}}:{{`{{`}}instance{{`}}`}} {{`{{`}}remote_name{{`}}`}}:{{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Failed Samples", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 15, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_remote_storage_retried_samples_total{cluster=~\"$cluster\", instance=~\"$instance\"}[5m]) or rate(prometheus_remote_storage_samples_retried_total{cluster=~\"$cluster\", instance=~\"$instance\"}[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cluster{{`}}`}}:{{`{{`}}instance{{`}}`}} {{`{{`}}remote_name{{`}}`}}:{{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Retried Samples", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 16, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_remote_storage_enqueue_retries_total{cluster=~\"$cluster\", instance=~\"$instance\"}[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cluster{{`}}`}}:{{`{{`}}instance{{`}}`}} {{`{{`}}remote_name{{`}}`}}:{{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Enqueue Retries", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Misc. Rates", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "prometheus-mixin" + ], + "templating": { + "list": [ + { + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "value": { + "selected": true, + "text": "All", + "value": "$__all" + } + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "instance", + "options": [ + + ], + "query": "label_values(prometheus_build_info, instance)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "value": { + "selected": true, + "text": "All", + "value": "$__all" + } + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": true, + "label": null, + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_container_info{image=~\".*prometheus.*\"}, cluster)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "url", + "options": [ + + ], + "query": "label_values(prometheus_remote_storage_shards{cluster=~\"$cluster\", instance=~\"$instance\"}, url)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Prometheus / Remote Write", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/prometheus.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/prometheus.yaml new file mode 100755 index 000000000..f3292faf2 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/prometheus.yaml @@ -0,0 +1,1227 @@ +{{- /* +Generated from 'prometheus' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "prometheus" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + prometheus.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "60s", + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Count", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "hidden", + "unit": "short" + }, + { + "alias": "Uptime", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Instance", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "instance", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Job", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "job", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Version", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTargetBlank": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "version", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "count by (job, instance, version) (prometheus_build_info{job=~\"$job\", instance=~\"$instance\"})", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "max by (job, instance) (time() - process_start_time_seconds{job=~\"$job\", instance=~\"$instance\"})", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Prometheus Stats", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Prometheus Stats", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(prometheus_target_sync_length_seconds_sum{job=~\"$job\",instance=~\"$instance\"}[5m])) by (scrape_job) * 1e3", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}scrape_job{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Target Sync", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(prometheus_sd_discovered_targets{job=~\"$job\",instance=~\"$instance\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Targets", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Targets", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Discovery", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_target_interval_length_seconds_sum{job=~\"$job\",instance=~\"$instance\"}[5m]) / rate(prometheus_target_interval_length_seconds_count{job=~\"$job\",instance=~\"$instance\"}[5m]) * 1e3", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}interval{{`}}`}} configured", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Average Scrape Interval Duration", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (job) (rate(prometheus_target_scrapes_exceeded_sample_limit_total[1m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "exceeded sample limit: {{`{{`}}job{{`}}`}}", + "legendLink": null, + "step": 10 + }, + { + "expr": "sum by (job) (rate(prometheus_target_scrapes_sample_duplicate_timestamp_total[1m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "duplicate timestamp: {{`{{`}}job{{`}}`}}", + "legendLink": null, + "step": 10 + }, + { + "expr": "sum by (job) (rate(prometheus_target_scrapes_sample_out_of_bounds_total[1m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "out of bounds: {{`{{`}}job{{`}}`}}", + "legendLink": null, + "step": 10 + }, + { + "expr": "sum by (job) (rate(prometheus_target_scrapes_sample_out_of_order_total[1m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "out of order: {{`{{`}}job{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Scrape failures", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_tsdb_head_samples_appended_total{job=~\"$job\",instance=~\"$instance\"}[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}job{{`}}`}} {{`{{`}}instance{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Appended Samples", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Retrieval", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_tsdb_head_series{job=~\"$job\",instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}job{{`}}`}} {{`{{`}}instance{{`}}`}} head series", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Head Series", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "prometheus_tsdb_head_chunks{job=~\"$job\",instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}job{{`}}`}} {{`{{`}}instance{{`}}`}} head chunks", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Head Chunks", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Storage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(prometheus_engine_query_duration_seconds_count{job=~\"$job\",instance=~\"$instance\",slice=\"inner_eval\"}[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}job{{`}}`}} {{`{{`}}instance{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Query Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "max by (slice) (prometheus_engine_query_duration_seconds{quantile=\"0.9\",job=~\"$job\",instance=~\"$instance\"}) * 1e3", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}slice{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Stage Duration", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Query", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "prometheus-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "job", + "multi": true, + "name": "job", + "options": [ + + ], + "query": "label_values(prometheus_build_info, job)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "instance", + "multi": true, + "name": "instance", + "options": [ + + ], + "query": "label_values(prometheus_build_info, instance)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "utc", + "title": "Prometheus / Overview", + "uid": "", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/proxy.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/proxy.yaml new file mode 100755 index 000000000..a7cecd5dd --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/proxy.yaml @@ -0,0 +1,1232 @@ +{{- /* +Generated from 'proxy' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if (include "exporter.kubeProxy.enabled" .)}} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "proxy" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + proxy.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 2, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(up{job=\"{{ include "exporter.kubeProxy.jobName" . }}\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Up", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "min" + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 5, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(kubeproxy_sync_proxy_rules_duration_seconds_count{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "rate", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rules Sync Rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 5, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99,rate(kubeproxy_sync_proxy_rules_duration_seconds_bucket{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rule Sync Latency 99th Quantile", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(kubeproxy_network_programming_duration_seconds_count{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "rate", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Network Programming Rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 6, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(kubeproxy_network_programming_duration_seconds_bucket{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Network Programming Latency 99th Quantile", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 7, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\",code=~\"2..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "2xx", + "refId": "A" + }, + { + "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\",code=~\"3..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "3xx", + "refId": "B" + }, + { + "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\",code=~\"4..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "4xx", + "refId": "C" + }, + { + "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\",code=~\"5..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "5xx", + "refId": "D" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Kube API Request Rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 8, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 8, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{job=\"{{ include "exporter.kubeProxy.jobName" . }}\",instance=~\"$instance\",verb=\"POST\"}[5m])) by (verb, url, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}verb{{`}}`}} {{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Post Request Latency 99th Quantile", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 9, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{job=\"{{ include "exporter.kubeProxy.jobName" . }}\", instance=~\"$instance\", verb=\"GET\"}[5m])) by (verb, url, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}verb{{`}}`}} {{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Get Request Latency 99th Quantile", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "process_resident_memory_bytes{job=\"{{ include "exporter.kubeProxy.jobName" . }}\",instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 11, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(process_cpu_seconds_total{job=\"{{ include "exporter.kubeProxy.jobName" . }}\",instance=~\"$instance\"}[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_goroutines{job=\"{{ include "exporter.kubeProxy.jobName" . }}\",instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Goroutines", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "instance", + "options": [ + + ], + "query": "label_values(kubeproxy_network_programming_duration_seconds_bucket{job=\"{{ include "exporter.kubeProxy.jobName" . }}\"}, instance)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Proxy", + "uid": "632e265de029684c40b21cb76bca4f94", + "version": 0 + } +{{- end }}{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/scheduler.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/scheduler.yaml new file mode 100755 index 000000000..eba5d160c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/scheduler.yaml @@ -0,0 +1,1076 @@ +{{- /* +Generated from 'scheduler' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if (include "exporter.kubeScheduler.enabled" .)}} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "scheduler" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + scheduler.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 2, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 2, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(up{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Up", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "min" + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 3, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 5, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(scheduler_e2e_scheduling_duration_seconds_count{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} e2e", + "refId": "A" + }, + { + "expr": "sum(rate(scheduler_binding_duration_seconds_count{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} binding", + "refId": "B" + }, + { + "expr": "sum(rate(scheduler_scheduling_algorithm_duration_seconds_count{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} scheduling algorithm", + "refId": "C" + }, + { + "expr": "sum(rate(scheduler_volume_scheduling_duration_seconds_count{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}[5m])) by (instance)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} volume", + "refId": "D" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Scheduling Rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 5, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\",instance=~\"$instance\"}[5m])) by (instance, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} e2e", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(scheduler_binding_duration_seconds_bucket{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\",instance=~\"$instance\"}[5m])) by (instance, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} binding", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\",instance=~\"$instance\"}[5m])) by (instance, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} scheduling algorithm", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.99, sum(rate(scheduler_volume_scheduling_duration_seconds_bucket{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\",instance=~\"$instance\"}[5m])) by (instance, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} volume", + "refId": "D" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Scheduling latency 99th Quantile", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 5, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\",code=~\"2..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "2xx", + "refId": "A" + }, + { + "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\",code=~\"3..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "3xx", + "refId": "B" + }, + { + "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\",code=~\"4..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "4xx", + "refId": "C" + }, + { + "expr": "sum(rate(rest_client_requests_total{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\",code=~\"5..\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "5xx", + "refId": "D" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Kube API Request Rate", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 8, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\", verb=\"POST\"}[5m])) by (verb, url, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}verb{{`}}`}} {{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Post Request Latency 99th Quantile", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 7, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(rest_client_request_duration_seconds_bucket{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\", verb=\"GET\"}[5m])) by (verb, url, le))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}verb{{`}}`}} {{`{{`}}url{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Get Request Latency 99th Quantile", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 8, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "process_resident_memory_bytes{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(process_cpu_seconds_total{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\", instance=~\"$instance\"}[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "go_goroutines{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\",instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Goroutines", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "instance", + "options": [ + + ], + "query": "label_values(process_cpu_seconds_total{job=\"{{ include "exporter.kubeScheduler.jobName" . }}\"}, instance)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Scheduler", + "uid": "2e6b6a3b4bddf1427b3a55aa1311c656", + "version": 0 + } +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/statefulset.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/statefulset.yaml new file mode 100755 index 000000000..3512fada2 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/statefulset.yaml @@ -0,0 +1,928 @@ +{{- /* +Generated from 'statefulset' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "statefulset" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + statefulset.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "refresh": "", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 2, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "cores", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(rate(container_cpu_usage_seconds_total{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", cluster=\"$cluster\", container!=\"\", namespace=\"$namespace\", pod=~\"$statefulset.*\"}[3m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "CPU", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 3, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "GB", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(container_memory_usage_bytes{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", cluster=\"$cluster\", container!=\"\", namespace=\"$namespace\", pod=~\"$statefulset.*\"}) / 1024^3", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Memory", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 4, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "Bps", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(rate(container_network_transmit_bytes_total{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", cluster=\"$cluster\", namespace=\"$namespace\", pod=~\"$statefulset.*\"}[3m])) + sum(rate(container_network_receive_bytes_total{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", cluster=\"$cluster\", namespace=\"$namespace\",pod=~\"$statefulset.*\"}[3m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Network", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "height": "100px", + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 5, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "max(kube_statefulset_replicas{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", statefulset=\"$statefulset\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Desired Replicas", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 6, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "min(kube_statefulset_status_replicas_current{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", statefulset=\"$statefulset\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Replicas of current version", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 7, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "max(kube_statefulset_status_observed_generation{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", statefulset=\"$statefulset\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Observed Generation", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 8, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "max(kube_statefulset_metadata_generation{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Metadata Generation", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "fillGradient": 0, + "gridPos": { + + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max(kube_statefulset_replicas{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "replicas specified", + "refId": "A" + }, + { + "expr": "max(kube_statefulset_status_replicas{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "replicas created", + "refId": "B" + }, + { + "expr": "min(kube_statefulset_status_replicas_ready{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "ready", + "refId": "C" + }, + { + "expr": "min(kube_statefulset_status_replicas_current{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "replicas of current version", + "refId": "D" + }, + { + "expr": "min(kube_statefulset_status_replicas_updated{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "updated", + "refId": "E" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Replicas", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_statefulset_metadata_generation, cluster)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(kube_statefulset_metadata_generation{job=\"kube-state-metrics\", cluster=\"$cluster\"}, namespace)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Name", + "multi": false, + "name": "statefulset", + "options": [ + + ], + "query": "label_values(kube_statefulset_metadata_generation{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\"}, statefulset)", + "refresh": 2, + "regex": "", + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / StatefulSets", + "uid": "a31c1f46e6f727cb37c0d731a7245005", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/workload-total.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/workload-total.yaml new file mode 100755 index 000000000..cd4e2364d --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards-1.14/workload-total.yaml @@ -0,0 +1,1438 @@ +{{- /* +Generated from 'workload-total' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "workload-total" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + workload-total.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "panels": [ + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "panels": [ + + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Current Bandwidth", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": { + + }, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 3, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}} pod {{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Rate of Bytes Received", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}} pod {{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Rate of Bytes Transmitted", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "collapse": true, + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 5, + "panels": [ + { + "aliasColors": { + + }, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 11 + }, + "id": 6, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(avg(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}} pod {{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Average Rate of Bytes Received", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 11 + }, + "id": 7, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sideWidth": null, + "sort": "current", + "sortDesc": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [ + + ], + "minSpan": 24, + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 24, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(avg(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}} pod {{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Average Rate of Bytes Transmitted", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "series", + "name": null, + "show": false, + "values": [ + "current" + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Average Bandwidth", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 11 + }, + "id": 8, + "panels": [ + + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Bandwidth HIstory", + "titleSize": "h6", + "type": "row" + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 12 + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Receive Bandwidth", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 12 + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_bytes_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Transmit Bandwidth", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "collapse": true, + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 21 + }, + "id": 11, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 22 + }, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_packets_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 22 + }, + "id": 13, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_packets_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Packets", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": true, + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 22 + }, + "id": 14, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 23 + }, + "id": 15, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_receive_packets_dropped_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Received Packets Dropped", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 2, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 23 + }, + "id": 16, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "sideWidth": null, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [ + + ], + "minSpan": 12, + "nullPointMode": "connected", + "paceLength": 10, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum(irate(container_network_transmit_packets_dropped_total{cluster=\"$cluster\",namespace=~\"$namespace\"}[$interval:$resolution])\n* on (namespace,pod)\ngroup_left(workload,workload_type) namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\"$workload\", workload_type=\"$type\"}) by (pod))\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "refId": "A", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Rate of Transmitted Packets Dropped", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "pps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Errors", + "titleSize": "h6", + "type": "row" + } + ], + "refresh": "10s", + "rows": [ + + ], + "schemaVersion": 18, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 1, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": ".+", + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "kube-system", + "value": "kube-system" + }, + "datasource": "$datasource", + "definition": "label_values(container_network_receive_packets_total{cluster=\"$cluster\"}, namespace)", + "hide": 0, + "includeAll": true, + "label": null, + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(container_network_receive_packets_total{cluster=\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "", + "value": "" + }, + "datasource": "$datasource", + "definition": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\"}, workload)", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "workload", + "options": [ + + ], + "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\"}, workload)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "deployment", + "value": "deployment" + }, + "datasource": "$datasource", + "definition": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\"$workload\"}, workload_type)", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "type", + "options": [ + + ], + "query": "label_values(namespace_workload_pod:kube_pod_owner:relabel{cluster=\"$cluster\",namespace=~\"$namespace\", workload=~\"$workload\"}, workload_type)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "5m", + "value": "5m" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "resolution", + "options": [ + { + "selected": false, + "text": "30s", + "value": "30s" + }, + { + "selected": true, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + } + ], + "query": "30s,5m,1h", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "interval", + "useTags": false + }, + { + "allValue": null, + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "5m", + "value": "5m" + }, + "datasource": "$datasource", + "hide": 2, + "includeAll": false, + "label": null, + "multi": false, + "name": "interval", + "options": [ + { + "selected": true, + "text": "4h", + "value": "4h" + } + ], + "query": "4h", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "interval", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "UTC", + "title": "Kubernetes / Networking / Workload", + "uid": "728bf77cc1166d2f3133bf25846876cc", + "version": 0 + } +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/etcd.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/etcd.yaml new file mode 100755 index 000000000..ac54228e9 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/etcd.yaml @@ -0,0 +1,1118 @@ +{{- /* +Generated from 'etcd' from https://raw.githubusercontent.com/etcd-io/website/master/content/docs/current/op-guide/grafana.json +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +{{- if (include "exporter.kubeEtcd.enabled" .)}} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "etcd" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + etcd.json: |- + { + "annotations": { + "list": [] + }, + "description": "etcd sample Grafana dashboard with Prometheus", + "editable": true, + "gnetId": null, + "hideControls": false, + "links": [], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "editable": true, + "height": "250px", + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "$datasource", + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "id": 28, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "targets": [ + { + "expr": "sum(etcd_server_has_leader{job=\"$cluster\"})", + "intervalFactor": 2, + "legendFormat": "", + "metric": "etcd_server_has_leader", + "refId": "A", + "step": 20 + } + ], + "thresholds": "", + "title": "Up", + "type": "singlestat", + "valueFontSize": "200%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "avg" + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 0, + "id": 23, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 5, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(grpc_server_started_total{job=\"$cluster\",grpc_type=\"unary\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "RPC Rate", + "metric": "grpc_server_started_total", + "refId": "A", + "step": 2 + }, + { + "expr": "sum(rate(grpc_server_handled_total{job=\"$cluster\",grpc_type=\"unary\",grpc_code!=\"OK\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "RPC Failed Rate", + "metric": "grpc_server_handled_total", + "refId": "B", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "RPC Rate", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 0, + "id": 41, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 4, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(grpc_server_started_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Watch\",grpc_type=\"bidi_stream\"})", + "intervalFactor": 2, + "legendFormat": "Watch Streams", + "metric": "grpc_server_handled_total", + "refId": "A", + "step": 4 + }, + { + "expr": "sum(grpc_server_started_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"}) - sum(grpc_server_handled_total{job=\"$cluster\",grpc_service=\"etcdserverpb.Lease\",grpc_type=\"bidi_stream\"})", + "intervalFactor": 2, + "legendFormat": "Lease Streams", + "metric": "grpc_server_handled_total", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Active Streams", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "showTitle": false, + "title": "Row" + }, + { + "collapse": false, + "editable": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "decimals": null, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "etcd_mvcc_db_total_size_in_bytes{job=\"$cluster\"}", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} DB Size", + "metric": "", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "DB Size", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 1, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 4, + "stack": false, + "steppedLine": true, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=\"$cluster\"}[5m])) by (instance, le))", + "hide": false, + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} WAL fsync", + "metric": "etcd_disk_wal_fsync_duration_seconds_bucket", + "refId": "A", + "step": 4 + }, + { + "expr": "histogram_quantile(0.99, sum(rate(etcd_disk_backend_commit_duration_seconds_bucket{job=\"$cluster\"}[5m])) by (instance, le))", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} DB fsync", + "metric": "etcd_disk_backend_commit_duration_seconds_bucket", + "refId": "B", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Disk Sync Duration", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 0, + "id": 29, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "process_resident_memory_bytes{job=\"$cluster\"}", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Resident Memory", + "metric": "process_resident_memory_bytes", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Memory", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "title": "New row" + }, + { + "collapse": false, + "editable": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 5, + "id": 22, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 3, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(etcd_network_client_grpc_received_bytes_total{job=\"$cluster\"}[5m])", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Client Traffic In", + "metric": "etcd_network_client_grpc_received_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Client Traffic In", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 5, + "id": 21, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 3, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "rate(etcd_network_client_grpc_sent_bytes_total{job=\"$cluster\"}[5m])", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Client Traffic Out", + "metric": "etcd_network_client_grpc_sent_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Client Traffic Out", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 0, + "id": 20, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_network_peer_received_bytes_total{job=\"$cluster\"}[5m])) by (instance)", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Peer Traffic In", + "metric": "etcd_network_peer_received_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Peer Traffic In", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "decimals": null, + "editable": true, + "error": false, + "fill": 0, + "grid": {}, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 3, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_network_peer_sent_bytes_total{job=\"$cluster\"}[5m])) by (instance)", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Peer Traffic Out", + "metric": "etcd_network_peer_sent_bytes_total", + "refId": "A", + "step": 4 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Peer Traffic Out", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "Bps", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "title": "New row" + }, + { + "collapse": false, + "editable": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fill": 0, + "id": 40, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(etcd_server_proposals_failed_total{job=\"$cluster\"}[5m]))", + "intervalFactor": 2, + "legendFormat": "Proposal Failure Rate", + "metric": "etcd_server_proposals_failed_total", + "refId": "A", + "step": 2 + }, + { + "expr": "sum(etcd_server_proposals_pending{job=\"$cluster\"})", + "intervalFactor": 2, + "legendFormat": "Proposal Pending Total", + "metric": "etcd_server_proposals_pending", + "refId": "B", + "step": 2 + }, + { + "expr": "sum(rate(etcd_server_proposals_committed_total{job=\"$cluster\"}[5m]))", + "intervalFactor": 2, + "legendFormat": "Proposal Commit Rate", + "metric": "etcd_server_proposals_committed_total", + "refId": "C", + "step": 2 + }, + { + "expr": "sum(rate(etcd_server_proposals_applied_total{job=\"$cluster\"}[5m]))", + "intervalFactor": 2, + "legendFormat": "Proposal Apply Rate", + "refId": "D", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Raft Proposals", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "$datasource", + "decimals": 0, + "editable": true, + "error": false, + "fill": 0, + "id": 19, + "isNew": true, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "changes(etcd_server_leader_changes_seen_total{job=\"$cluster\"}[1d])", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}instance{{`}}`}} Total Leader Elections Per Day", + "metric": "etcd_server_leader_changes_seen_total", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "Total Leader Elections Per Day", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "title": "New row" + } + ], + "schemaVersion": 13, + "sharedCrosshair": false, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [], + "query": "label_values(etcd_server_has_leader, job)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "now": true, + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "etcd", + "uid": "c2f4e12cdf69feb95caa41a5a1b423d9", + "version": 215 + } +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml new file mode 100755 index 000000000..2b8eac4df --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-cluster-rsrc-use.yaml @@ -0,0 +1,959 @@ +{{- /* +Generated from 'k8s-cluster-rsrc-use' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-cluster-rsrc-use" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + k8s-cluster-rsrc-use.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "node:cluster_cpu_utilisation:ratio{cluster=\"$cluster\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}node{{`}}`}}", + "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_cpu_saturation_load1:{cluster=\"$cluster\"} / scalar(sum(min(kube_pod_info{cluster=\"$cluster\"}) by (node)))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}node{{`}}`}}", + "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Saturation (Load1)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "node:cluster_memory_utilisation:ratio{cluster=\"$cluster\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}node{{`}}`}}", + "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_memory_swap_io_bytes:sum_rate{cluster=\"$cluster\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}node{{`}}`}}", + "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Saturation (Swap I/O)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_disk_utilisation:avg_irate{cluster=\"$cluster\"} / scalar(:kube_pod_info_node_count:{cluster=\"$cluster\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}node{{`}}`}}", + "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk IO Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_disk_saturation:avg_irate{cluster=\"$cluster\"} / scalar(:kube_pod_info_node_count:{cluster=\"$cluster\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}node{{`}}`}}", + "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk IO Saturation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Disk", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_net_utilisation:sum_irate{cluster=\"$cluster\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}node{{`}}`}}", + "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Net Utilisation (Transmitted)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_net_saturation:sum_irate{cluster=\"$cluster\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}node{{`}}`}}", + "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Net Saturation (Dropped)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Network", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(max(node_filesystem_size_bytes{fstype=~\"ext[234]|btrfs|xfs|zfs\", cluster=\"$cluster\"} - node_filesystem_avail_bytes{fstype=~\"ext[234]|btrfs|xfs|zfs\", cluster=\"$cluster\"}) by (device,pod,namespace)) by (pod,namespace)\n/ scalar(sum(max(node_filesystem_size_bytes{fstype=~\"ext[234]|btrfs|xfs|zfs\", cluster=\"$cluster\"}) by (device,pod,namespace)))\n* on (namespace, pod) group_left (node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\"}\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}node{{`}}`}}", + "legendLink": "./d/4ac4f123aae0ff6dbaf4f4f66120033b/k8s-node-rsrc-use", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk Capacity", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Storage", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(:kube_pod_info_node_count:, cluster)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / USE Method / Cluster", + "uid": "a6e7d1362e1ddbb79db21d5bb40d7137", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-node-rsrc-use.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-node-rsrc-use.yaml new file mode 100755 index 000000000..101252086 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-node-rsrc-use.yaml @@ -0,0 +1,986 @@ +{{- /* +Generated from 'k8s-node-rsrc-use' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-node-rsrc-use" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + k8s-node-rsrc-use.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_cpu_utilisation:avg1m{cluster=\"$cluster\", node=\"$node\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Utilisation", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_cpu_saturation_load1:{cluster=\"$cluster\", node=\"$node\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Saturation", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Saturation (Load1)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_memory_utilisation:{cluster=\"$cluster\", node=\"$node\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Memory", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_memory_swap_io_bytes:sum_rate{cluster=\"$cluster\", node=\"$node\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Swap IO", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Saturation (Swap I/O)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_disk_utilisation:avg_irate{cluster=\"$cluster\", node=\"$node\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Utilisation", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk IO Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_disk_saturation:avg_irate{cluster=\"$cluster\", node=\"$node\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Saturation", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk IO Saturation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Disk", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_net_utilisation:sum_irate{cluster=\"$cluster\", node=\"$node\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Utilisation", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Net Utilisation (Transmitted)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_net_saturation:sum_irate{cluster=\"$cluster\", node=\"$node\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Saturation", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Net Saturation (Dropped)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "Bps", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Net", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "node:node_filesystem_usage:{cluster=\"$cluster\"}\n* on (namespace, pod) group_left (node) node_namespace_pod:kube_pod_info:{cluster=\"$cluster\", node=\"$node\"}\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}device{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Disk", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(:kube_pod_info_node_count:, cluster)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "node", + "multi": false, + "name": "node", + "options": [ + + ], + "query": "label_values(kube_node_info{cluster=\"$cluster\"}, node)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / USE Method / Node", + "uid": "4ac4f123aae0ff6dbaf4f4f66120033b", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-cluster.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-cluster.yaml new file mode 100755 index 000000000..e068214bc --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-cluster.yaml @@ -0,0 +1,1479 @@ +{{- /* +Generated from 'k8s-resources-cluster' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-cluster" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + k8s-resources-cluster.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "100px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 2, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - avg(rate(node_cpu_seconds_total{mode=\"idle\", cluster=\"$cluster\"}[1m]))", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 2, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) / sum(node:node_num_cpu:sum{cluster=\"$cluster\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "CPU Requests Commitment", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 2, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\"}) / sum(node:node_num_cpu:sum{cluster=\"$cluster\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "CPU Limits Commitment", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 2, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "1 - sum(:node_memory_MemFreeCachedBuffers_bytes:sum{cluster=\"$cluster\"}) / sum(:node_memory_MemTotal_bytes:sum{cluster=\"$cluster\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "Memory Utilisation", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 5, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 2, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) / sum(:node_memory_MemTotal_bytes:sum{cluster=\"$cluster\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "Memory Requests Commitment", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "format": "percentunit", + "id": 6, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 2, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\"}) / sum(:node_memory_MemTotal_bytes:sum{cluster=\"$cluster\"})", + "format": "time_series", + "instant": true, + "intervalFactor": 2, + "refId": "A" + } + ], + "thresholds": "70,80", + "timeFrom": null, + "timeShift": null, + "title": "Memory Limits Commitment", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "singlestat", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Headlines", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 7, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 8, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Pods", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "link": true, + "linkTooltip": "Drill down to pods", + "linkUrl": "./d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_1", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Workloads", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "link": true, + "linkTooltip": "Drill down to workloads", + "linkUrl": "./d/a87fb0d919ec0ea5f6543124e16c42a5/k8s-resources-workloads-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_1", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "CPU Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #G", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Namespace", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down to pods", + "linkUrl": "./d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell", + "pattern": "namespace", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "count(mixin_pod_workload{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "count(avg(mixin_pod_workload{cluster=\"$cluster\"}) by (workload, namespace)) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\"}) by (namespace) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container_name!=\"\"}) by (namespace)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}namespace{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage (w/o cache)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Pods", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "link": true, + "linkTooltip": "Drill down to pods", + "linkUrl": "./d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_1", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Workloads", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "link": true, + "linkTooltip": "Drill down to workloads", + "linkUrl": "./d/a87fb0d919ec0ea5f6543124e16c42a5/k8s-resources-workloads-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell_1", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Memory Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #G", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Namespace", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down to pods", + "linkUrl": "./d/85a562078cdf77779eaa1add43ccec1e/k8s-resources-namespace?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$__cell", + "pattern": "namespace", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "count(mixin_pod_workload{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "count(avg(mixin_pod_workload{cluster=\"$cluster\"}) by (workload, namespace)) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container_name!=\"\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container_name!=\"\"}) by (namespace) / sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", container_name!=\"\"}) by (namespace) / sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\"}) by (namespace)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Requests by Namespace", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Requests", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(:kube_pod_info_node_count:, cluster)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / Compute Resources / Cluster", + "uid": "efa86fd1d0c121a26444b636a3f509a8", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-namespace.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-namespace.yaml new file mode 100755 index 000000000..af3664731 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-namespace.yaml @@ -0,0 +1,963 @@ +{{- /* +Generated from 'k8s-resources-namespace' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-namespace" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + k8s-resources-namespace.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod_name)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod_name{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "CPU Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "CPU Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down", + "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"}) by (pod_name)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod_name{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage (w/o cache)", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Memory Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Usage (RSS)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Usage (Cache)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #G", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Usage (Swap", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #H", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down", + "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(label_replace(container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) / sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod) / sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\"}) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\",container_name!=\"\"}, \"pod\", \"$1\", \"pod_name\", \"(.*)\")) by (pod)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Quota", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(:kube_pod_info_node_count:, cluster)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "namespace", + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / Compute Resources / Namespace (Pods)", + "uid": "85a562078cdf77779eaa1add43ccec1e", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-pod.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-pod.yaml new file mode 100755 index 000000000..536a2c704 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-pod.yaml @@ -0,0 +1,1006 @@ +{{- /* +Generated from 'k8s-resources-pod' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-pod" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + k8s-resources-pod.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\", cluster=\"$cluster\"}) by (container_name)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}container_name{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "CPU Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "CPU Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Container", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "container", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container) / sum(kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(label_replace(namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container) / sum(kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\", container_name!=\"\"}) by (container_name)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}container_name{{`}}`}} (RSS)", + "legendLink": null, + "step": 10 + }, + { + "expr": "sum(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\", container_name!=\"\"}) by (container_name)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}container_name{{`}}`}} (Cache)", + "legendLink": null, + "step": 10 + }, + { + "expr": "sum(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\", container_name!=\"\"}) by (container_name)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}container_name{{`}}`}} (Swap)", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Memory Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Usage (RSS)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Usage (Cache)", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #G", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Usage (Swap", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #H", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Container", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "container", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(label_replace(container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"POD\", container_name!=\"\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container) / sum(kube_pod_container_resource_requests_memory_bytes{namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name!=\"\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container) / sum(kube_pod_container_resource_limits_memory_bytes{namespace=\"$namespace\", pod=\"$pod\"}) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_rss{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name != \"\", container_name != \"POD\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_cache{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name != \"\", container_name != \"POD\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "G", + "step": 10 + }, + { + "expr": "sum(label_replace(container_memory_swap{cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name != \"\", container_name != \"POD\"}, \"container\", \"$1\", \"container_name\", \"(.*)\")) by (container)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "H", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Quota", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(:kube_pod_info_node_count:, cluster)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "namespace", + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "pod", + "multi": false, + "name": "pod", + "options": [ + + ], + "query": "label_values(kube_pod_info{cluster=\"$cluster\", namespace=\"$namespace\"}, pod)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / Compute Resources / Pod", + "uid": "6581e46e4e5c7ba40a07646395ef7b23", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-workload.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-workload.yaml new file mode 100755 index 000000000..f5844b505 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-workload.yaml @@ -0,0 +1,936 @@ +{{- /* +Generated from 'k8s-resources-workload' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-workload" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + k8s-resources-workload.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "CPU Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "CPU Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down", + "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n/sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n ) by (pod)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}pod{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Memory Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Pod", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down", + "linkUrl": "./d/6581e46e4e5c7ba40a07646395ef7b23/k8s-resources-pod?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-pod=$__cell", + "pattern": "pod", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n ) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n ) by (pod)\n/sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n ) by (pod)\n/sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\", workload_type=\"$type\"}\n) by (pod)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Quota", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(:kube_pod_info_node_count:, cluster)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "namespace", + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "workload", + "multi": false, + "name": "workload", + "options": [ + + ], + "query": "label_values(mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}, workload)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "type", + "multi": false, + "name": "type", + "options": [ + + ], + "query": "label_values(mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\", workload=\"$workload\"}, workload_type)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / Compute Resources / Workload", + "uid": "a164a7f0339f99e89cea5cb47e9be617", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-workloads-namespace.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-workloads-namespace.yaml new file mode 100755 index 000000000..8a8b5077b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/k8s-resources-workloads-namespace.yaml @@ -0,0 +1,972 @@ +{{- /* +Generated from 'k8s-resources-workloads-namespace' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "k8s-resources-workloads-namespace" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + k8s-resources-workloads-namespace.json: |- + { + "annotations": { + "list": [ + + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 1, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}workload{{`}}`}} - {{`{{`}}workload_type{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Running Pods", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "CPU Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "CPU Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Workload", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down", + "linkUrl": "./d/a164a7f0339f99e89cea5cb47e9be617/k8s-resources-workload?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-workload=$__cell&var-type=$__cell_2", + "pattern": "workload", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Workload Type", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "workload_type", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "count(mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}) by (workload, workload_type)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_requests_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(\n label_replace(\n namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate{cluster=\"$cluster\", namespace=\"$namespace\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_limits_cpu_cores{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "CPU Quota", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 10, + "id": 3, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 0, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n ) by (workload, workload_type)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}workload{{`}}`}} - {{`{{`}}workload_type{{`}}`}}", + "legendLink": null, + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Usage", + "titleSize": "h6" + }, + { + "collapse": false, + "height": "250px", + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "id": 4, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null as zero", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "styles": [ + { + "alias": "Time", + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "pattern": "Time", + "type": "hidden" + }, + { + "alias": "Running Pods", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 0, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #A", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Memory Usage", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #B", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #C", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Requests %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #D", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Memory Limits", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #E", + "thresholds": [ + + ], + "type": "number", + "unit": "bytes" + }, + { + "alias": "Memory Limits %", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "Value #F", + "thresholds": [ + + ], + "type": "number", + "unit": "percentunit" + }, + { + "alias": "Workload", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": true, + "linkTooltip": "Drill down", + "linkUrl": "./d/a164a7f0339f99e89cea5cb47e9be617/k8s-resources-workload?var-datasource=$datasource&var-cluster=$cluster&var-namespace=$namespace&var-workload=$__cell&var-type=$__cell_2", + "pattern": "workload", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "Workload Type", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "link": false, + "linkTooltip": "Drill down", + "linkUrl": "", + "pattern": "workload_type", + "thresholds": [ + + ], + "type": "number", + "unit": "short" + }, + { + "alias": "", + "colorMode": null, + "colors": [ + + ], + "dateFormat": "YYYY-MM-DD HH:mm:ss", + "decimals": 2, + "pattern": "/.*/", + "thresholds": [ + + ], + "type": "string", + "unit": "short" + } + ], + "targets": [ + { + "expr": "count(mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}) by (workload, workload_type)", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 10 + }, + { + "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n ) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "B", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "C", + "step": 10 + }, + { + "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n ) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_requests_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "D", + "step": 10 + }, + { + "expr": "sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "E", + "step": 10 + }, + { + "expr": "sum(\n label_replace(\n container_memory_usage_bytes{cluster=\"$cluster\", namespace=\"$namespace\", container_name!=\"\"},\n \"pod\", \"$1\", \"pod_name\", \"(.*)\"\n ) * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n ) by (workload, workload_type)\n/sum(\n kube_pod_container_resource_limits_memory_bytes{cluster=\"$cluster\", namespace=\"$namespace\"}\n * on(namespace,pod) group_left(workload, workload_type) mixin_pod_workload{cluster=\"$cluster\", namespace=\"$namespace\"}\n) by (workload, workload_type)\n", + "format": "table", + "instant": true, + "intervalFactor": 2, + "legendFormat": "", + "refId": "F", + "step": 10 + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Quota", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transform": "table", + "type": "table", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Memory Quota", + "titleSize": "h6" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(:kube_pod_info_node_count:, cluster)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "namespace", + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / Compute Resources / Namespace (Workloads)", + "uid": "a87fb0d919ec0ea5f6543124e16c42a5", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/nodes.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/nodes.yaml new file mode 100755 index 000000000..17a97dae1 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/nodes.yaml @@ -0,0 +1,1383 @@ +{{- /* +Generated from 'nodes' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "nodes" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + nodes.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "refresh": "", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 2, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max(node_load1{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "load 1m", + "refId": "A" + }, + { + "expr": "max(node_load5{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "load 5m", + "refId": "B" + }, + { + "expr": "max(node_load15{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "load 15m", + "refId": "C" + }, + { + "expr": "count(node_cpu_seconds_total{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\", mode=\"user\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "logical cores", + "refId": "D" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "System load", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 3, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (cpu) (irate(node_cpu_seconds_total{cluster=\"$cluster\", job=\"node-exporter\", mode!=\"idle\", instance=\"$instance\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}cpu{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Usage Per Core", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 4, + "legend": { + "alignAsTable": "true", + "avg": "true", + "current": "true", + "max": "false", + "min": "false", + "rightSide": "true", + "show": "true", + "total": "false", + "values": "true" + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 9, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max (sum by (cpu) (irate(node_cpu_seconds_total{cluster=\"$cluster\", job=\"node-exporter\", mode!=\"idle\", instance=\"$instance\"}[2m])) ) * 100\n", + "format": "time_series", + "intervalFactor": 10, + "legendFormat": "{{`{{`}} cpu {{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Utilization", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percent", + "label": null, + "logBase": 1, + "max": 100, + "min": 0, + "show": true + }, + { + "format": "percent", + "label": null, + "logBase": 1, + "max": 100, + "min": 0, + "show": true + } + ] + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 5, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "avg(sum by (cpu) (irate(node_cpu_seconds_total{cluster=\"$cluster\", job=\"node-exporter\", mode!=\"idle\", instance=\"$instance\"}[2m]))) * 100\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "80, 90", + "title": "CPU Usage", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 6, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 9, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max(\n node_memory_MemTotal_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_MemFree_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_Buffers_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_Cached_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "memory used", + "refId": "A" + }, + { + "expr": "max(node_memory_Buffers_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "memory buffers", + "refId": "B" + }, + { + "expr": "max(node_memory_Cached_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "memory cached", + "refId": "C" + }, + { + "expr": "max(node_memory_MemFree_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "memory free", + "refId": "D" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 7, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "max(\n (\n (\n node_memory_MemTotal_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_MemFree_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_Buffers_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_memory_Cached_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n )\n / node_memory_MemTotal_bytes{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n ) * 100)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "80, 90", + "title": "Memory Usage", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 8, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + { + "alias": "read", + "yaxis": 1 + }, + { + "alias": "io time", + "yaxis": 2 + } + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max(rate(node_disk_read_bytes_total{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}[2m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "read", + "refId": "A" + }, + { + "expr": "max(rate(node_disk_written_bytes_total{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}[2m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "written", + "refId": "B" + }, + { + "expr": "max(rate(node_disk_io_time_seconds_total{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}[2m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "io time", + "refId": "C" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk I/O", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max by (namespace, pod, device) ((node_filesystem_size_bytes{cluster=\"$cluster\", fstype=~\"ext[234]|btrfs|xfs|zfs\", instance=\"$instance\", job=\"node-exporter\"} - node_filesystem_avail_bytes{cluster=\"$cluster\", fstype=~\"ext[234]|btrfs|xfs|zfs\", instance=\"$instance\", job=\"node-exporter\"}) / node_filesystem_size_bytes{cluster=\"$cluster\", fstype=~\"ext[234]|btrfs|xfs|zfs\", instance=\"$instance\", job=\"node-exporter\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "disk used", + "refId": "A" + }, + { + "expr": "max by (namespace, pod, device) (node_filesystem_avail_bytes{cluster=\"$cluster\", fstype=~\"ext[234]|btrfs|xfs|zfs\", instance=\"$instance\", job=\"node-exporter\"} / node_filesystem_size_bytes{cluster=\"$cluster\", fstype=~\"ext[234]|btrfs|xfs|zfs\", instance=\"$instance\", job=\"node-exporter\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "disk free", + "refId": "B" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk Space Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 10, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max(rate(node_network_receive_bytes_total{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\", device!~\"lo\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}device{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Network Received", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 11, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max(rate(node_network_transmit_bytes_total{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\", device!~\"lo\"}[5m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{`{{`}}device{{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Network Transmitted", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 12, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 9, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max(\n node_filesystem_files{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_filesystem_files_free{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "inodes used", + "refId": "A" + }, + { + "expr": "max(node_filesystem_files_free{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "inodes free", + "refId": "B" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Inodes Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 13, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "max(\n (\n (\n node_filesystem_files{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n - node_filesystem_files_free{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n )\n / node_filesystem_files{cluster=\"$cluster\", job=\"node-exporter\", instance=\"$instance\"}\n ) * 100)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "80, 90", + "title": "Inodes Usage", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "instance", + "options": [ + + ], + "query": "label_values(node_boot_time_seconds{cluster=\"$cluster\", job=\"node-exporter\"}, instance)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / Nodes", + "uid": "fa49a4706d07a042595b664c87fb33ea", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/persistentvolumesusage.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/persistentvolumesusage.yaml new file mode 100755 index 000000000..f6bc2955c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/persistentvolumesusage.yaml @@ -0,0 +1,573 @@ +{{- /* +Generated from 'persistentvolumesusage' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "persistentvolumesusage" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + persistentvolumesusage.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "refresh": "", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 9, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "(\n sum without(instance, node) (kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n -\n sum without(instance, node) (kubelet_volume_stats_available_bytes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Used Space", + "refId": "A" + }, + { + "expr": "sum without(instance, node) (kubelet_volume_stats_available_bytes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Free Space", + "refId": "B" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Volume Space Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 3, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "(\n kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n -\n kubelet_volume_stats_available_bytes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n)\n/\nkubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n* 100\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "80, 90", + "title": "Volume Space Usage", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 9, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum without(instance, node) (kubelet_volume_stats_inodes_used{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "Used inodes", + "refId": "A" + }, + { + "expr": "(\n sum without(instance, node) (kubelet_volume_stats_inodes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n -\n sum without(instance, node) (kubelet_volume_stats_inodes_used{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"})\n)\n", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": " Free inodes", + "refId": "B" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Volume inodes Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "$datasource", + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 5, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "kubelet_volume_stats_inodes_used{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n/\nkubelet_volume_stats_inodes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\", persistentvolumeclaim=\"$volume\"}\n* 100\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "80, 90", + "title": "Volume inodes Usage", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kubelet_volume_stats_capacity_bytes, cluster)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\"}, namespace)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "PersistentVolumeClaim", + "multi": false, + "name": "volume", + "options": [ + + ], + "query": "label_values(kubelet_volume_stats_capacity_bytes{cluster=\"$cluster\", job=\"kubelet\", namespace=\"$namespace\"}, persistentvolumeclaim)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-7d", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / Persistent Volumes", + "uid": "919b92a8e8041bd567af9edab12c840c", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/pods.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/pods.yaml new file mode 100755 index 000000000..3b1e1539d --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/pods.yaml @@ -0,0 +1,680 @@ +{{- /* +Generated from 'pods' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "pods" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + pods.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "$datasource", + "enable": true, + "expr": "time() == BOOL timestamp(rate(kube_pod_container_status_restarts_total{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}[2m]) > 0)", + "hide": false, + "iconColor": "rgba(215, 44, 44, 1)", + "name": "Restarts", + "showIn": 0, + "tags": [ + "restart" + ], + "type": "rows" + } + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "refresh": "", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by(container_name) (container_memory_usage_bytes{job=\"kubelet\", cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\", container_name=~\"$container\", container_name!=\"POD\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Current: {{`{{`}} container_name {{`}}`}}", + "refId": "A" + }, + { + "expr": "sum by(container) (kube_pod_container_resource_requests{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\", pod=\"$pod\", container=~\"$container\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Requested: {{`{{`}} container {{`}}`}}", + "refId": "B" + }, + { + "expr": "sum by(container) (kube_pod_container_resource_limits{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", resource=\"memory\", pod=\"$pod\", container=~\"$container\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Limit: {{`{{`}} container {{`}}`}}", + "refId": "C" + }, + { + "expr": "sum by(container_name) (container_memory_cache{job=\"kubelet\", namespace=\"$namespace\", pod_name=~\"$pod\", container_name=~\"$container\", container_name!=\"POD\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Cache: {{`{{`}} container_name {{`}}`}}", + "refId": "D" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Memory Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 3, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (container_name) (rate(container_cpu_usage_seconds_total{job=\"kubelet\", cluster=\"$cluster\", namespace=\"$namespace\", image!=\"\", pod_name=\"$pod\", container_name=~\"$container\", container_name!=\"POD\"}[1m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Current: {{`{{`}} container_name {{`}}`}}", + "refId": "A" + }, + { + "expr": "sum by(container) (kube_pod_container_resource_requests{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\", pod=\"$pod\", container=~\"$container\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Requested: {{`{{`}} container {{`}}`}}", + "refId": "B" + }, + { + "expr": "sum by(container) (kube_pod_container_resource_limits{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", resource=\"cpu\", pod=\"$pod\", container=~\"$container\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Limit: {{`{{`}} container {{`}}`}}", + "refId": "C" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Usage", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sort_desc(sum by (pod_name) (rate(container_network_receive_bytes_total{job=\"kubelet\", cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\"}[1m])))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "RX: {{`{{`}} pod_name {{`}}`}}", + "refId": "A" + }, + { + "expr": "sort_desc(sum by (pod_name) (rate(container_network_transmit_bytes_total{job=\"kubelet\", cluster=\"$cluster\", namespace=\"$namespace\", pod_name=\"$pod\"}[1m])))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "TX: {{`{{`}} pod_name {{`}}`}}", + "refId": "B" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Network I/O", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 5, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "span": 12, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max by (container) (kube_pod_container_status_restarts_total{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\", container=~\"$container\"})", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "Restarts: {{`{{`}} container {{`}}`}}", + "refId": "A" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Restarts Per Container", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_pod_info, cluster)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(kube_pod_info{cluster=\"$cluster\"}, namespace)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Pod", + "multi": false, + "name": "pod", + "options": [ + + ], + "query": "label_values(kube_pod_info{cluster=\"$cluster\", namespace=~\"$namespace\"}, pod)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": true, + "label": "Container", + "multi": false, + "name": "container", + "options": [ + + ], + "query": "label_values(kube_pod_container_info{cluster=\"$cluster\", namespace=\"$namespace\", pod=\"$pod\"}, container)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / Pods", + "uid": "ab4f13a9892a76a4d21ce8c2445bf4ea", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/statefulset.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/statefulset.yaml new file mode 100755 index 000000000..01dbf1265 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/dashboards/statefulset.yaml @@ -0,0 +1,926 @@ +{{- /* +Generated from 'statefulset' from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/grafana-dashboardDefinitions.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "statefulset" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: + statefulset.json: |- + { + "__inputs": [ + + ], + "__requires": [ + + ], + "annotations": { + "list": [ + + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [ + + ], + "refresh": "", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 2, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "cores", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(rate(container_cpu_usage_seconds_total{job=\"kubelet\", cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"$statefulset.*\"}[3m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "CPU", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 3, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "GB", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(container_memory_usage_bytes{job=\"kubelet\", cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"$statefulset.*\"}) / 1024^3", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Memory", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 4, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "Bps", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 4, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(rate(container_network_transmit_bytes_total{job=\"kubelet\", cluster=\"$cluster\", namespace=\"$namespace\", pod_name=~\"$statefulset.*\"}[3m])) + sum(rate(container_network_receive_bytes_total{cluster=\"$cluster\", namespace=\"$namespace\",pod_name=~\"$statefulset.*\"}[3m]))", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Network", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "height": "100px", + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 5, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "max(kube_statefulset_replicas{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", statefulset=\"$statefulset\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Desired Replicas", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 6, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "min(kube_statefulset_status_replicas_current{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", statefulset=\"$statefulset\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Replicas of current version", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 7, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "max(kube_statefulset_status_observed_generation{job=\"kube-state-metrics\", cluster=\"$cluster\", namespace=\"$namespace\", statefulset=\"$statefulset\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Observed Generation", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "$datasource", + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + + }, + "id": 8, + "interval": null, + "links": [ + + ], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "span": 3, + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "max(kube_statefulset_metadata_generation{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Metadata Generation", + "tooltip": { + "shared": false + }, + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "0", + "value": "null" + } + ], + "valueName": "current" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "aliasColors": { + + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fill": 1, + "gridPos": { + + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [ + + ], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "repeat": null, + "seriesOverrides": [ + + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max(kube_statefulset_replicas{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "replicas specified", + "refId": "A" + }, + { + "expr": "max(kube_statefulset_status_replicas{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "replicas created", + "refId": "B" + }, + { + "expr": "min(kube_statefulset_status_replicas_ready{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "ready", + "refId": "C" + }, + { + "expr": "min(kube_statefulset_status_replicas_current{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "replicas of current version", + "refId": "D" + }, + { + "expr": "min(kube_statefulset_status_replicas_updated{job=\"kube-state-metrics\", statefulset=\"$statefulset\", cluster=\"$cluster\", namespace=\"$namespace\"}) without (instance, pod)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "updated", + "refId": "E" + } + ], + "thresholds": [ + + ], + "timeFrom": null, + "timeShift": null, + "title": "Replicas", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [ + + ] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": false, + "title": "Dashboard Row", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "kubernetes-mixin" + ], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "label": null, + "name": "datasource", + "options": [ + + ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": {{ if .Values.grafana.sidecar.dashboards.multicluster }}0{{ else }}2{{ end }}, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ + + ], + "query": "label_values(kube_statefulset_metadata_generation, cluster)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [ + + ], + "query": "label_values(kube_statefulset_metadata_generation{job=\"kube-state-metrics\"}, namespace)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "Name", + "multi": false, + "name": "statefulset", + "options": [ + + ], + "query": "label_values(kube_statefulset_metadata_generation{job=\"kube-state-metrics\", namespace=\"$namespace\"}, statefulset)", + "refresh": 2, + "regex": "", + "sort": 0, + "tagValuesQuery": "", + "tags": [ + + ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Kubernetes / StatefulSets", + "uid": "a31c1f46e6f727cb37c0d731a7245005", + "version": 0 + } +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/namespaces.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/namespaces.yaml new file mode 100755 index 000000000..39ed210ed --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/namespaces.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled (not .Values.grafana.defaultDashboards.useExistingNamespace) }} +apiVersion: v1 +kind: Namespace +metadata: + name: {{ .Values.grafana.defaultDashboards.namespace }} + labels: + name: {{ .Values.grafana.defaultDashboards.namespace }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} + annotations: +{{- if not .Values.grafana.defaultDashboards.cleanupOnUninstall }} + helm.sh/resource-policy: "keep" +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/servicemonitor.yaml new file mode 100755 index 000000000..1e839d707 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/grafana/servicemonitor.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.grafana.enabled .Values.grafana.serviceMonitor.selfMonitor }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-grafana + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-grafana +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: grafana + app.kubernetes.io/instance: {{ $.Release.Name | quote }} + namespaceSelector: + matchNames: + - {{ printf "%s" (include "kube-prometheus-stack.namespace" .) | quote }} + endpoints: + - port: {{ .Values.grafana.service.portName }} + {{- if .Values.grafana.serviceMonitor.interval }} + interval: {{ .Values.grafana.serviceMonitor.interval }} + {{- end }} + path: {{ .Values.grafana.serviceMonitor.path | quote }} +{{- if .Values.grafana.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.grafana.serviceMonitor.metricRelabelings | indent 6) . }} +{{- end }} +{{- if .Values.grafana.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.grafana.serviceMonitor.relabelings | indent 6 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/clusterrole.yaml new file mode 100755 index 000000000..249af770a --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/clusterrole.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "kube-prometheus-stack.name" $ }}-admission +{{- include "kube-prometheus-stack.labels" $ | indent 4 }} +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + - mutatingwebhookconfigurations + verbs: + - get + - update +{{- if .Values.global.rbac.pspEnabled }} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} + - apiGroups: ['policy'] +{{- else }} + - apiGroups: ['extensions'] +{{- end }} + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "kube-prometheus-stack.fullname" . }}-admission +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/clusterrolebinding.yaml new file mode 100755 index 000000000..31fd2def0 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "kube-prometheus-stack.name" $ }}-admission +{{- include "kube-prometheus-stack.labels" $ | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "kube-prometheus-stack.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ template "kube-prometheus-stack.fullname" . }}-admission + namespace: {{ template "kube-prometheus-stack.namespace" . }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml new file mode 100755 index 000000000..e86610cb7 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/job-createSecret.yaml @@ -0,0 +1,65 @@ +{{- if and .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-admission-create + namespace: {{ template "kube-prometheus-stack.namespace" . }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "kube-prometheus-stack.name" $ }}-admission-create +{{- include "kube-prometheus-stack.labels" $ | indent 4 }} +spec: + {{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 + {{- end }} + template: + metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-admission-create +{{- with .Values.prometheusOperator.admissionWebhooks.patch.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + labels: + app: {{ template "kube-prometheus-stack.name" $ }}-admission-create +{{- include "kube-prometheus-stack.labels" $ | indent 8 }} + spec: + {{- if .Values.prometheusOperator.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.prometheusOperator.admissionWebhooks.patch.priorityClassName }} + {{- end }} + containers: + - name: create + {{- if .Values.prometheusOperator.admissionWebhooks.patch.image.sha }} + image: {{ template "system_default_registry" . }}{{ .Values.prometheusOperator.admissionWebhooks.patch.image.repository }}:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.tag }}@sha256:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.sha }} + {{- else }} + image: {{ template "system_default_registry" . }}{{ .Values.prometheusOperator.admissionWebhooks.patch.image.repository }}:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.tag }} + {{- end }} + imagePullPolicy: {{ .Values.prometheusOperator.admissionWebhooks.patch.image.pullPolicy }} + args: + - create + - --host={{ template "kube-prometheus-stack.operator.fullname" . }},{{ template "kube-prometheus-stack.operator.fullname" . }}.{{ template "kube-prometheus-stack.namespace" . }}.svc + - --namespace={{ template "kube-prometheus-stack.namespace" . }} + - --secret-name={{ template "kube-prometheus-stack.fullname" . }}-admission + resources: +{{ toYaml .Values.prometheusOperator.admissionWebhooks.patch.resources | indent 12 }} + restartPolicy: OnFailure + serviceAccountName: {{ template "kube-prometheus-stack.fullname" . }}-admission + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- with .Values.prometheusOperator.admissionWebhooks.patch.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + {{- with .Values.prometheusOperator.admissionWebhooks.patch.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- with .Values.prometheusOperator.admissionWebhooks.patch.tolerations }} +{{ toYaml . | indent 8 }} +{{- end }} + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml new file mode 100755 index 000000000..c2742073f --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/job-patchWebhook.yaml @@ -0,0 +1,66 @@ +{{- if and .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-admission-patch + namespace: {{ template "kube-prometheus-stack.namespace" . }} + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "kube-prometheus-stack.name" $ }}-admission-patch +{{- include "kube-prometheus-stack.labels" $ | indent 4 }} +spec: + {{- if .Capabilities.APIVersions.Has "batch/v1alpha1" }} + # Alpha feature since k8s 1.12 + ttlSecondsAfterFinished: 0 + {{- end }} + template: + metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-admission-patch +{{- with .Values.prometheusOperator.admissionWebhooks.patch.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + labels: + app: {{ template "kube-prometheus-stack.name" $ }}-admission-patch +{{- include "kube-prometheus-stack.labels" $ | indent 8 }} + spec: + {{- if .Values.prometheusOperator.admissionWebhooks.patch.priorityClassName }} + priorityClassName: {{ .Values.prometheusOperator.admissionWebhooks.patch.priorityClassName }} + {{- end }} + containers: + - name: patch + {{- if .Values.prometheusOperator.admissionWebhooks.patch.image.sha }} + image: {{ template "system_default_registry" . }}{{ .Values.prometheusOperator.admissionWebhooks.patch.image.repository }}:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.tag }}@sha256:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.sha }} + {{- else }} + image: {{ template "system_default_registry" . }}{{ .Values.prometheusOperator.admissionWebhooks.patch.image.repository }}:{{ .Values.prometheusOperator.admissionWebhooks.patch.image.tag }} + {{- end }} + imagePullPolicy: {{ .Values.prometheusOperator.admissionWebhooks.patch.image.pullPolicy }} + args: + - patch + - --webhook-name={{ template "kube-prometheus-stack.fullname" . }}-admission + - --namespace={{ template "kube-prometheus-stack.namespace" . }} + - --secret-name={{ template "kube-prometheus-stack.fullname" . }}-admission + - --patch-failure-policy={{ .Values.prometheusOperator.admissionWebhooks.failurePolicy }} + resources: +{{ toYaml .Values.prometheusOperator.admissionWebhooks.patch.resources | indent 12 }} + restartPolicy: OnFailure + serviceAccountName: {{ template "kube-prometheus-stack.fullname" . }}-admission + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- with .Values.prometheusOperator.admissionWebhooks.patch.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + {{- with .Values.prometheusOperator.admissionWebhooks.patch.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- with .Values.prometheusOperator.admissionWebhooks.patch.tolerations }} +{{ toYaml . | indent 8 }} +{{- end }} + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/psp.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/psp.yaml new file mode 100755 index 000000000..5834c483c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/psp.yaml @@ -0,0 +1,54 @@ +{{- if and .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-admission + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "kube-prometheus-stack.name" . }}-admission +{{- if .Values.global.rbac.pspAnnotations }} + annotations: +{{ toYaml .Values.global.rbac.pspAnnotations | indent 4 }} +{{- end }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + privileged: false + # Required to prevent escalations to root. + # allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + #requiredDropCapabilities: + # - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Permits the container to run with root privileges as well. + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/role.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/role.yaml new file mode 100755 index 000000000..d229f76ef --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/role.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-admission + namespace: {{ template "kube-prometheus-stack.namespace" . }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "kube-prometheus-stack.name" $ }}-admission +{{- include "kube-prometheus-stack.labels" $ | indent 4 }} +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/rolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/rolebinding.yaml new file mode 100755 index 000000000..f4b1fbf0e --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/rolebinding.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-admission + namespace: {{ template "kube-prometheus-stack.namespace" . }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "kube-prometheus-stack.name" $ }}-admission +{{- include "kube-prometheus-stack.labels" $ | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "kube-prometheus-stack.fullname" . }}-admission +subjects: + - kind: ServiceAccount + name: {{ template "kube-prometheus-stack.fullname" . }}-admission + namespace: {{ template "kube-prometheus-stack.namespace" . }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml new file mode 100755 index 000000000..2048f049c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/job-patch/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.prometheusOperator.admissionWebhooks.enabled .Values.prometheusOperator.admissionWebhooks.patch.enabled .Values.global.rbac.create (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-admission + namespace: {{ template "kube-prometheus-stack.namespace" . }} + annotations: + "helm.sh/hook": pre-install,pre-upgrade,post-install,post-upgrade + "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded + labels: + app: {{ template "kube-prometheus-stack.name" $ }}-admission +{{- include "kube-prometheus-stack.labels" $ | indent 4 }} +imagePullSecrets: +{{ toYaml .Values.global.imagePullSecrets | indent 2 }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml new file mode 100755 index 000000000..b67df54bf --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/mutatingWebhookConfiguration.yaml @@ -0,0 +1,41 @@ +{{- if and .Values.prometheusOperator.admissionWebhooks.enabled }} +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-admission +{{- if .Values.prometheusOperator.admissionWebhooks.certManager.enabled }} + annotations: + certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "kube-prometheus-stack.fullname" .) | quote }} + cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "kube-prometheus-stack.fullname" .) | quote }} +{{- end }} + labels: + app: {{ template "kube-prometheus-stack.name" $ }}-admission +{{- include "kube-prometheus-stack.labels" $ | indent 4 }} +webhooks: + - name: prometheusrulemutate.monitoring.coreos.com + {{- if .Values.prometheusOperator.admissionWebhooks.patch.enabled }} + failurePolicy: Ignore + {{- else }} + failurePolicy: {{ .Values.prometheusOperator.admissionWebhooks.failurePolicy }} + {{- end }} + rules: + - apiGroups: + - monitoring.coreos.com + apiVersions: + - "*" + resources: + - prometheusrules + operations: + - CREATE + - UPDATE + clientConfig: + service: + namespace: {{ template "kube-prometheus-stack.namespace" . }} + name: {{ template "kube-prometheus-stack.operator.fullname" $ }} + path: /admission-prometheusrules/mutate + {{- if and .Values.prometheusOperator.admissionWebhooks.caBundle (not .Values.prometheusOperator.admissionWebhooks.patch.enabled) (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} + caBundle: {{ .Values.prometheusOperator.admissionWebhooks.caBundle }} + {{- end }} + admissionReviewVersions: ["v1", "v1beta1"] + sideEffects: None +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml new file mode 100755 index 000000000..249488e41 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/admission-webhooks/validatingWebhookConfiguration.yaml @@ -0,0 +1,41 @@ +{{- if and .Values.prometheusOperator.admissionWebhooks.enabled }} +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-admission +{{- if .Values.prometheusOperator.admissionWebhooks.certManager.enabled }} + annotations: + certmanager.k8s.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "kube-prometheus-stack.fullname" .) | quote }} + cert-manager.io/inject-ca-from: {{ printf "%s/%s-root-cert" .Release.Namespace (include "kube-prometheus-stack.fullname" .) | quote }} +{{- end }} + labels: + app: {{ template "kube-prometheus-stack.name" $ }}-admission +{{- include "kube-prometheus-stack.labels" $ | indent 4 }} +webhooks: + - name: prometheusrulemutate.monitoring.coreos.com + {{- if .Values.prometheusOperator.admissionWebhooks.patch.enabled }} + failurePolicy: Ignore + {{- else }} + failurePolicy: {{ .Values.prometheusOperator.admissionWebhooks.failurePolicy }} + {{- end }} + rules: + - apiGroups: + - monitoring.coreos.com + apiVersions: + - "*" + resources: + - prometheusrules + operations: + - CREATE + - UPDATE + clientConfig: + service: + namespace: {{ template "kube-prometheus-stack.namespace" . }} + name: {{ template "kube-prometheus-stack.operator.fullname" $ }} + path: /admission-prometheusrules/validate + {{- if and .Values.prometheusOperator.admissionWebhooks.caBundle (not .Values.prometheusOperator.admissionWebhooks.patch.enabled) (not .Values.prometheusOperator.admissionWebhooks.certManager.enabled) }} + caBundle: {{ .Values.prometheusOperator.admissionWebhooks.caBundle }} + {{- end }} + admissionReviewVersions: ["v1", "v1beta1"] + sideEffects: None +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/certmanager.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/certmanager.yaml new file mode 100755 index 000000000..090e6a5bb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/certmanager.yaml @@ -0,0 +1,57 @@ +{{- if .Values.prometheusOperator.admissionWebhooks.certManager.enabled -}} +{{- if not .Values.prometheusOperator.admissionWebhooks.certManager.issuerRef -}} +# Create a selfsigned Issuer, in order to create a root CA certificate for +# signing webhook serving certificates +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-self-signed-issuer + namespace: {{ template "kube-prometheus-stack.namespace" . }} +spec: + selfSigned: {} +--- +# Generate a CA Certificate used to sign certificates for the webhook +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-root-cert + namespace: {{ template "kube-prometheus-stack.namespace" . }} +spec: + secretName: {{ template "kube-prometheus-stack.fullname" . }}-root-cert + duration: 43800h # 5y + issuerRef: + name: {{ template "kube-prometheus-stack.fullname" . }}-self-signed-issuer + commonName: "ca.webhook.kube-prometheus-stack" + isCA: true +--- +# Create an Issuer that uses the above generated CA certificate to issue certs +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-root-issuer + namespace: {{ template "kube-prometheus-stack.namespace" . }} +spec: + ca: + secretName: {{ template "kube-prometheus-stack.fullname" . }}-root-cert +{{- end }} +--- +# generate a serving certificate for the apiservices to use +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-admission + namespace: {{ template "kube-prometheus-stack.namespace" . }} +spec: + secretName: {{ template "kube-prometheus-stack.fullname" . }}-admission + duration: 8760h # 1y + issuerRef: + {{- if .Values.prometheusOperator.admissionWebhooks.certManager.issuerRef }} + {{- toYaml .Values.prometheusOperator.admissionWebhooks.certManager.issuerRef | nindent 4 }} + {{- else }} + name: {{ template "kube-prometheus-stack.fullname" . }}-root-issuer + {{- end }} + dnsNames: + - {{ template "kube-prometheus-stack.operator.fullname" . }} + - {{ template "kube-prometheus-stack.operator.fullname" . }}.{{ template "kube-prometheus-stack.namespace" . }} + - {{ template "kube-prometheus-stack.operator.fullname" . }}.{{ template "kube-prometheus-stack.namespace" . }}.svc +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/clusterrole.yaml new file mode 100755 index 000000000..e5568534c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/clusterrole.yaml @@ -0,0 +1,80 @@ +{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-operator + labels: + app: {{ template "kube-prometheus-stack.name" . }}-operator +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +rules: +- apiGroups: + - monitoring.coreos.com + resources: + - alertmanagers + - alertmanagers/finalizers + - alertmanagerconfigs + - prometheuses + - prometheuses/finalizers + - thanosrulers + - thanosrulers/finalizers + - servicemonitors + - podmonitors + - probes + - prometheusrules + verbs: + - '*' +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - '*' +- apiGroups: + - "" + resources: + - configmaps + - secrets + verbs: + - '*' +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - delete +- apiGroups: + - "" + resources: + - services + - services/finalizers + - endpoints + verbs: + - get + - create + - update + - delete +- apiGroups: + - "" + resources: + - nodes + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/clusterrolebinding.yaml new file mode 100755 index 000000000..c9ab0ab87 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/clusterrolebinding.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-operator + labels: + app: {{ template "kube-prometheus-stack.name" . }}-operator +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "kube-prometheus-stack.fullname" . }}-operator +subjects: +- kind: ServiceAccount + name: {{ template "kube-prometheus-stack.operator.serviceAccountName" . }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/deployment.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/deployment.yaml new file mode 100755 index 000000000..6e72acfa0 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/deployment.yaml @@ -0,0 +1,145 @@ +{{- $namespace := printf "%s" (include "kube-prometheus-stack.namespace" .) }} +{{- if .Values.prometheusOperator.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-operator + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-operator +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + replicas: 1 + selector: + matchLabels: + app: {{ template "kube-prometheus-stack.name" . }}-operator + release: {{ $.Release.Name | quote }} + template: + metadata: + labels: + app: {{ template "kube-prometheus-stack.name" . }}-operator +{{ include "kube-prometheus-stack.labels" . | indent 8 }} +{{- if .Values.prometheusOperator.podLabels }} +{{ toYaml .Values.prometheusOperator.podLabels | indent 8 }} +{{- end }} +{{- if .Values.prometheusOperator.podAnnotations }} + annotations: +{{ toYaml .Values.prometheusOperator.podAnnotations | indent 8 }} +{{- end }} + spec: + {{- if .Values.prometheusOperator.priorityClassName }} + priorityClassName: {{ .Values.prometheusOperator.priorityClassName }} + {{- end }} + containers: + - name: {{ template "kube-prometheus-stack.name" . }} + {{- if .Values.prometheusOperator.image.sha }} + image: "{{ template "system_default_registry" . }}{{ .Values.prometheusOperator.image.repository }}:{{ .Values.prometheusOperator.image.tag }}@sha256:{{ .Values.prometheusOperator.image.sha }}" + {{- else }} + image: "{{ template "system_default_registry" . }}{{ .Values.prometheusOperator.image.repository }}:{{ .Values.prometheusOperator.image.tag }}" + {{- end }} + imagePullPolicy: "{{ .Values.prometheusOperator.image.pullPolicy }}" + args: + {{- if .Values.prometheusOperator.kubeletService.enabled }} + - --kubelet-service={{ .Values.prometheusOperator.kubeletService.namespace }}/{{ template "kube-prometheus-stack.fullname" . }}-kubelet + {{- end }} + {{- if .Values.prometheusOperator.logFormat }} + - --log-format={{ .Values.prometheusOperator.logFormat }} + {{- end }} + {{- if .Values.prometheusOperator.logLevel }} + - --log-level={{ .Values.prometheusOperator.logLevel }} + {{- end }} + {{- if .Values.prometheusOperator.denyNamespaces }} + - --deny-namespaces={{ .Values.prometheusOperator.denyNamespaces | join "," }} + {{- end }} + {{- with $.Values.prometheusOperator.namespaces }} + {{ $ns := .additional }} + {{- if .releaseNamespace }} + {{- $ns = append $ns $namespace }} + {{- end }} + - --namespaces={{ $ns | join "," }} + {{- end }} + - --localhost=127.0.0.1 + {{- if .Values.prometheusOperator.prometheusDefaultBaseImage }} + - --prometheus-default-base-image={{ .Values.prometheusOperator.prometheusDefaultBaseImage }} + {{- end }} + {{- if .Values.prometheusOperator.alertmanagerDefaultBaseImage }} + - --alertmanager-default-base-image={{ .Values.prometheusOperator.alertmanagerDefaultBaseImage }} + {{- end }} + {{- if .Values.prometheusOperator.prometheusConfigReloaderImage.sha }} + - --prometheus-config-reloader={{ template "system_default_registry" . }}{{ .Values.prometheusOperator.prometheusConfigReloaderImage.repository }}:{{ .Values.prometheusOperator.prometheusConfigReloaderImage.tag }}@sha256:{{ .Values.prometheusOperator.prometheusConfigReloaderImage.sha }} + {{- else }} + - --prometheus-config-reloader={{ template "system_default_registry" . }}{{ .Values.prometheusOperator.prometheusConfigReloaderImage.repository }}:{{ .Values.prometheusOperator.prometheusConfigReloaderImage.tag }} + {{- end }} + - --config-reloader-cpu={{ .Values.prometheusOperator.configReloaderCpu }} + - --config-reloader-memory={{ .Values.prometheusOperator.configReloaderMemory }} + {{- if .Values.prometheusOperator.alertmanagerInstanceNamespaces }} + - --alertmanager-instance-namespaces={{ .Values.prometheusOperator.alertmanagerInstanceNamespaces | join "," }} + {{- end }} + {{- if .Values.prometheusOperator.prometheusInstanceNamespaces }} + - --prometheus-instance-namespaces={{ .Values.prometheusOperator.prometheusInstanceNamespaces | join "," }} + {{- end }} + {{- if .Values.prometheusOperator.thanosRulerInstanceNamespaces }} + - --thanos-ruler-instance-namespaces={{ .Values.prometheusOperator.thanosRulerInstanceNamespaces | join "," }} + {{- end }} + {{- if .Values.prometheusOperator.secretFieldSelector }} + - --secret-field-selector={{ .Values.prometheusOperator.secretFieldSelector }} + {{- end }} + {{- if .Values.prometheusOperator.clusterDomain }} + - --cluster-domain={{ .Values.prometheusOperator.clusterDomain }} + {{- end }} + {{- if .Values.prometheusOperator.tls.enabled }} + - --web.enable-tls=true + - --web.cert-file=/cert/{{ if .Values.prometheusOperator.admissionWebhooks.certManager.enabled }}tls.crt{{ else }}cert{{ end }} + - --web.key-file=/cert/{{ if .Values.prometheusOperator.admissionWebhooks.certManager.enabled }}tls.key{{ else }}key{{ end }} + - --web.listen-address=:{{ .Values.prometheusOperator.tls.internalPort }} + - --web.tls-min-version={{ .Values.prometheusOperator.tls.tlsMinVersion }} + ports: + - containerPort: {{ .Values.prometheusOperator.tls.internalPort }} + name: https + {{- else }} + ports: + - containerPort: 8080 + name: http + {{- end }} + resources: +{{ toYaml .Values.prometheusOperator.resources | indent 12 }} + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true +{{- if .Values.prometheusOperator.tls.enabled }} + volumeMounts: + - name: tls-secret + mountPath: /cert + readOnly: true + volumes: + - name: tls-secret + secret: + defaultMode: 420 + secretName: {{ template "kube-prometheus-stack.fullname" . }}-admission +{{- end }} + {{- with .Values.prometheusOperator.dnsConfig }} + dnsConfig: +{{ toYaml . | indent 8 }} + {{- end }} +{{- if .Values.prometheusOperator.securityContext }} + securityContext: +{{ toYaml .Values.prometheusOperator.securityContext | indent 8 }} +{{- end }} + serviceAccountName: {{ template "kube-prometheus-stack.operator.serviceAccountName" . }} +{{- if .Values.prometheusOperator.hostNetwork }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet +{{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} +{{- with .Values.prometheusOperator.nodeSelector }} +{{ toYaml . | indent 8 }} +{{- end }} + {{- with .Values.prometheusOperator.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} +{{- with .Values.prometheusOperator.tolerations }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/psp-clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/psp-clusterrole.yaml new file mode 100755 index 000000000..d667d6275 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/psp-clusterrole.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-operator-psp + labels: + app: {{ template "kube-prometheus-stack.name" . }}-operator +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +rules: +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} +- apiGroups: ['policy'] +{{- else }} +- apiGroups: ['extensions'] +{{- end }} + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "kube-prometheus-stack.fullname" . }}-operator +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/psp-clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/psp-clusterrolebinding.yaml new file mode 100755 index 000000000..c538cd173 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/psp-clusterrolebinding.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-operator-psp + labels: + app: {{ template "kube-prometheus-stack.name" . }}-operator +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "kube-prometheus-stack.fullname" . }}-operator-psp +subjects: + - kind: ServiceAccount + name: {{ template "kube-prometheus-stack.operator.serviceAccountName" . }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/psp.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/psp.yaml new file mode 100755 index 000000000..18d1d37df --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/psp.yaml @@ -0,0 +1,51 @@ +{{- if and .Values.prometheusOperator.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-operator + labels: + app: {{ template "kube-prometheus-stack.name" . }}-operator +{{- if .Values.global.rbac.pspAnnotations }} + annotations: +{{ toYaml .Values.global.rbac.pspAnnotations | indent 4 }} +{{- end }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + privileged: false + # Required to prevent escalations to root. + # allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + #requiredDropCapabilities: + # - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: {{ .Values.prometheusOperator.hostNetwork }} + hostIPC: false + hostPID: false + runAsUser: + # Permits the container to run with root privileges as well. + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/service.yaml new file mode 100755 index 000000000..8ccb2bb2d --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/service.yaml @@ -0,0 +1,55 @@ +{{- if .Values.prometheusOperator.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-operator + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-operator +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.prometheusOperator.service.labels }} +{{ toYaml .Values.prometheusOperator.service.labels | indent 4 }} +{{- end }} +{{- if .Values.prometheusOperator.service.annotations }} + annotations: +{{ toYaml .Values.prometheusOperator.service.annotations | indent 4 }} +{{- end }} +spec: +{{- if .Values.prometheusOperator.service.clusterIP }} + clusterIP: {{ .Values.prometheusOperator.service.clusterIP }} +{{- end }} +{{- if .Values.prometheusOperator.service.externalIPs }} + externalIPs: +{{ toYaml .Values.prometheusOperator.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.prometheusOperator.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.prometheusOperator.service.loadBalancerIP }} +{{- end }} +{{- if .Values.prometheusOperator.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.prometheusOperator.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + {{- if not .Values.prometheusOperator.tls.enabled }} + - name: http + {{- if eq .Values.prometheusOperator.service.type "NodePort" }} + nodePort: {{ .Values.prometheusOperator.service.nodePort }} + {{- end }} + port: 8080 + targetPort: http + {{- end }} + {{- if .Values.prometheusOperator.tls.enabled }} + - name: https + {{- if eq .Values.prometheusOperator.service.type "NodePort"}} + nodePort: {{ .Values.prometheusOperator.service.nodePortTls }} + {{- end }} + port: 443 + targetPort: https + {{- end }} + selector: + app: {{ template "kube-prometheus-stack.name" . }}-operator + release: {{ $.Release.Name | quote }} + type: "{{ .Values.prometheusOperator.service.type }}" +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/serviceaccount.yaml new file mode 100755 index 000000000..ab41797e3 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kube-prometheus-stack.operator.serviceAccountName" . }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-operator +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +imagePullSecrets: +{{ toYaml .Values.global.imagePullSecrets | indent 2 }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/servicemonitor.yaml new file mode 100755 index 000000000..b7bd952bb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus-operator/servicemonitor.yaml @@ -0,0 +1,44 @@ +{{- if and .Values.prometheusOperator.enabled .Values.prometheusOperator.serviceMonitor.selfMonitor }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-operator + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-operator +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + endpoints: + {{- if .Values.prometheusOperator.tls.enabled }} + - port: https + scheme: https + tlsConfig: + serverName: {{ template "kube-prometheus-stack.operator.fullname" . }} + ca: + secret: + name: {{ template "kube-prometheus-stack.fullname" . }}-admission + key: {{ if .Values.prometheusOperator.admissionWebhooks.certManager.enabled }}ca.crt{{ else }}ca{{ end }} + optional: false + {{- else }} + - port: http + {{- end }} + honorLabels: true + {{- if .Values.prometheusOperator.serviceMonitor.interval }} + interval: {{ .Values.prometheusOperator.serviceMonitor.interval }} + {{- end }} +{{- if .Values.prometheusOperator.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.prometheusOperator.serviceMonitor.metricRelabelings | indent 6) . }} +{{- end }} +{{- if .Values.prometheusOperator.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.prometheusOperator.serviceMonitor.relabelings | indent 6 }} +{{- end }} + selector: + matchLabels: + app: {{ template "kube-prometheus-stack.name" . }}-operator + release: {{ $.Release.Name | quote }} + namespaceSelector: + matchNames: + - {{ printf "%s" (include "kube-prometheus-stack.namespace" .) | quote }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/_rules.tpl b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/_rules.tpl new file mode 100755 index 000000000..83245c089 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/_rules.tpl @@ -0,0 +1,38 @@ +{{- /* +Generated file. Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- define "rules.names" }} +rules: + - "alertmanager.rules" + - "general.rules" + - "k8s.rules" + - "kube-apiserver.rules" + - "kube-apiserver-availability.rules" + - "kube-apiserver-error" + - "kube-apiserver-slos" + - "kube-prometheus-general.rules" + - "kube-prometheus-node-alerting.rules" + - "kube-prometheus-node-recording.rules" + - "kube-scheduler.rules" + - "kube-state-metrics" + - "kubelet.rules" + - "kubernetes-absent" + - "kubernetes-resources" + - "kubernetes-storage" + - "kubernetes-system" + - "kubernetes-system-apiserver" + - "kubernetes-system-kubelet" + - "kubernetes-system-controller-manager" + - "kubernetes-system-scheduler" + - "node-exporter.rules" + - "node-exporter" + - "node.rules" + - "node-network" + - "node-time" + - "prometheus-operator" + - "prometheus.rules" + - "prometheus" + - "kubernetes-apps" + - "etcd" +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalAlertRelabelConfigs.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalAlertRelabelConfigs.yaml new file mode 100755 index 000000000..bff930981 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalAlertRelabelConfigs.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.prometheus.enabled .Values.prometheus.prometheusSpec.additionalAlertRelabelConfigs }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-am-relabel-confg + namespace: {{ template "kube-prometheus-stack.namespace" . }} +{{- if .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations }} + annotations: +{{ toYaml .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations | indent 4 }} +{{- end }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus-am-relabel-confg +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +data: + additional-alert-relabel-configs.yaml: {{ toYaml .Values.prometheus.prometheusSpec.additionalAlertRelabelConfigs | b64enc | quote }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalAlertmanagerConfigs.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalAlertmanagerConfigs.yaml new file mode 100755 index 000000000..8aebc96c3 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalAlertmanagerConfigs.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.prometheus.enabled .Values.prometheus.prometheusSpec.additionalAlertManagerConfigs }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-am-confg + namespace: {{ template "kube-prometheus-stack.namespace" . }} +{{- if .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations }} + annotations: +{{ toYaml .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations | indent 4 }} +{{- end }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus-am-confg +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +data: + additional-alertmanager-configs.yaml: {{ toYaml .Values.prometheus.prometheusSpec.additionalAlertManagerConfigs | b64enc | quote }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalPrometheusRules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalPrometheusRules.yaml new file mode 100755 index 000000000..794e9ad27 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalPrometheusRules.yaml @@ -0,0 +1,40 @@ +{{- if or .Values.additionalPrometheusRules .Values.additionalPrometheusRulesMap}} +apiVersion: v1 +kind: List +items: +{{- if .Values.additionalPrometheusRulesMap }} +{{- range $prometheusRuleName, $prometheusRule := .Values.additionalPrometheusRulesMap }} + - apiVersion: monitoring.coreos.com/v1 + kind: PrometheusRule + metadata: + name: {{ template "kube-prometheus-stack.name" $ }}-{{ $prometheusRuleName }} + namespace: {{ template "kube-prometheus-stack.namespace" $ }} + labels: + app: {{ template "kube-prometheus-stack.name" $ }} +{{ include "kube-prometheus-stack.labels" $ | indent 8 }} + {{- if $prometheusRule.additionalLabels }} +{{ toYaml $prometheusRule.additionalLabels | indent 8 }} + {{- end }} + spec: + groups: +{{ toYaml $prometheusRule.groups| indent 8 }} +{{- end }} +{{- else }} +{{- range .Values.additionalPrometheusRules }} + - apiVersion: monitoring.coreos.com/v1 + kind: PrometheusRule + metadata: + name: {{ template "kube-prometheus-stack.name" $ }}-{{ .name }} + namespace: {{ template "kube-prometheus-stack.namespace" $ }} + labels: + app: {{ template "kube-prometheus-stack.name" $ }} +{{ include "kube-prometheus-stack.labels" $ | indent 8 }} + {{- if .additionalLabels }} +{{ toYaml .additionalLabels | indent 8 }} + {{- end }} + spec: + groups: +{{ toYaml .groups| indent 8 }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalScrapeConfigs.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalScrapeConfigs.yaml new file mode 100755 index 000000000..21d9429d8 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/additionalScrapeConfigs.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.prometheus.enabled .Values.prometheus.prometheusSpec.additionalScrapeConfigs }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-scrape-confg + namespace: {{ template "kube-prometheus-stack.namespace" . }} +{{- if .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations }} + annotations: +{{ toYaml .Values.prometheus.prometheusSpec.additionalPrometheusSecretsAnnotations | indent 4 }} +{{- end }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus-scrape-confg +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +data: + additional-scrape-configs.yaml: {{ tpl (toYaml .Values.prometheus.prometheusSpec.additionalScrapeConfigs) $ | b64enc | quote }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/clusterrole.yaml new file mode 100755 index 000000000..3585b5db1 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/clusterrole.yaml @@ -0,0 +1,30 @@ +{{- if and .Values.prometheus.enabled .Values.global.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +rules: +# This permission are not in the kube-prometheus repo +# they're grabbed from https://github.com/prometheus/prometheus/blob/master/documentation/examples/rbac-setup.yml +- apiGroups: [""] + resources: + - nodes + - nodes/metrics + - services + - endpoints + - pods + verbs: ["get", "list", "watch"] +- apiGroups: + - "networking.k8s.io" + resources: + - ingresses + verbs: ["get", "list", "watch"] +- nonResourceURLs: ["/metrics", "/metrics/cadvisor"] + verbs: ["get"] +{{- if .Values.prometheus.additionalRulesForClusterRole }} +{{ toYaml .Values.prometheus.additionalRulesForClusterRole | indent 0 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/clusterrolebinding.yaml new file mode 100755 index 000000000..9fc4f65da --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/clusterrolebinding.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.prometheus.enabled .Values.global.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus +subjects: + - kind: ServiceAccount + name: {{ template "kube-prometheus-stack.prometheus.serviceAccountName" . }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} +{{- end }} + diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/ingress.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/ingress.yaml new file mode 100755 index 000000000..4d45873a7 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/ingress.yaml @@ -0,0 +1,65 @@ +{{- if and .Values.prometheus.enabled .Values.prometheus.ingress.enabled }} +{{- $pathType := .Values.prometheus.ingress.pathType | default "" }} +{{- $serviceName := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus" }} +{{- $servicePort := .Values.prometheus.service.port -}} +{{- $routePrefix := list .Values.prometheus.prometheusSpec.routePrefix }} +{{- $paths := .Values.prometheus.ingress.paths | default $routePrefix -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +apiVersion: networking.k8s.io/v1beta1 +{{ else }} +apiVersion: extensions/v1beta1 +{{ end -}} +kind: Ingress +metadata: +{{- if .Values.prometheus.ingress.annotations }} + annotations: +{{ toYaml .Values.prometheus.ingress.annotations | indent 4 }} +{{- end }} + name: {{ $serviceName }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.prometheus.ingress.labels }} +{{ toYaml .Values.prometheus.ingress.labels | indent 4 }} +{{- end }} +spec: + {{- if or (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") }} + {{- if .Values.prometheus.ingress.ingressClassName }} + ingressClassName: {{ .Values.prometheus.ingress.ingressClassName }} + {{- end }} + {{- end }} + rules: + {{- if .Values.prometheus.ingress.hosts }} + {{- range $host := .Values.prometheus.ingress.hosts }} + - host: {{ tpl $host $ }} + http: + paths: + {{- range $p := $paths }} + - path: {{ tpl $p $ }} + {{- if $pathType }} + pathType: {{ $pathType }} + {{- end }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} + {{- end -}} + {{- else }} + - http: + paths: + {{- range $p := $paths }} + - path: {{ tpl $p $ }} + {{- if $pathType }} + pathType: {{ $pathType }} + {{- end }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end -}} + {{- end -}} + {{- if .Values.prometheus.ingress.tls }} + tls: +{{ tpl (toYaml .Values.prometheus.ingress.tls | indent 4) . }} + {{- end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/ingressThanosSidecar.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/ingressThanosSidecar.yaml new file mode 100755 index 000000000..69de0f663 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/ingressThanosSidecar.yaml @@ -0,0 +1,64 @@ +{{- if and .Values.prometheus.enabled .Values.prometheus.thanosIngress.enabled }} +{{- $pathType := .Values.prometheus.thanosIngress.pathType | default "" }} +{{- $serviceName := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus" }} +{{- $thanosPort := .Values.prometheus.thanosIngress.servicePort -}} +{{- $routePrefix := list .Values.prometheus.prometheusSpec.routePrefix }} +{{- $paths := .Values.prometheus.thanosIngress.paths | default $routePrefix -}} +{{- if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +apiVersion: networking.k8s.io/v1beta1 +{{ else }} +apiVersion: extensions/v1beta1 +{{ end -}} +kind: Ingress +metadata: +{{- if .Values.prometheus.thanosIngress.annotations }} + annotations: +{{ toYaml .Values.prometheus.thanosIngress.annotations | indent 4 }} +{{- end }} + name: {{ template "kube-prometheus-stack.fullname" . }}-thanos-gateway + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.prometheus.thanosIngress.labels }} +{{ toYaml .Values.prometheus.thanosIngress.labels | indent 4 }} +{{- end }} +spec: + {{- if or (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") }} + {{- if .Values.prometheus.thanosIngress.ingressClassName }} + ingressClassName: {{ .Values.prometheus.thanosIngress.ingressClassName }} + {{- end }} + {{- end }} + rules: + {{- if .Values.prometheus.thanosIngress.hosts }} + {{- range $host := .Values.prometheus.thanosIngress.hosts }} + - host: {{ tpl $host $ }} + http: + paths: + {{- range $p := $paths }} + - path: {{ tpl $p $ }} + {{- if $pathType }} + pathType: {{ $pathType }} + {{- end }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $thanosPort }} + {{- end -}} + {{- end -}} + {{- else }} + - http: + paths: + {{- range $p := $paths }} + - path: {{ tpl $p $ }} + {{- if $pathType }} + pathType: {{ $pathType }} + {{- end }} + backend: + serviceName: {{ $serviceName }} + servicePort: {{ $thanosPort }} + {{- end -}} + {{- end -}} + {{- if .Values.prometheus.thanosIngress.tls }} + tls: +{{ toYaml .Values.prometheus.thanosIngress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/ingressperreplica.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/ingressperreplica.yaml new file mode 100755 index 000000000..33143775b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/ingressperreplica.yaml @@ -0,0 +1,62 @@ +{{- if and .Values.prometheus.enabled .Values.prometheus.servicePerReplica.enabled .Values.prometheus.ingressPerReplica.enabled }} +{{- $pathType := .Values.prometheus.ingressPerReplica.pathType | default "" }} +{{- $count := .Values.prometheus.prometheusSpec.replicas | int -}} +{{- $servicePort := .Values.prometheus.servicePerReplica.port -}} +{{- $ingressValues := .Values.prometheus.ingressPerReplica -}} +apiVersion: v1 +kind: List +metadata: + name: {{ include "kube-prometheus-stack.fullname" $ }}-prometheus-ingressperreplica + namespace: {{ template "kube-prometheus-stack.namespace" $ }} +items: +{{ range $i, $e := until $count }} + - kind: Ingress + {{- if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} + apiVersion: networking.k8s.io/v1beta1 + {{ else }} + apiVersion: extensions/v1beta1 + {{ end -}} + metadata: + name: {{ include "kube-prometheus-stack.fullname" $ }}-prometheus-{{ $i }} + namespace: {{ template "kube-prometheus-stack.namespace" $ }} + labels: + app: {{ include "kube-prometheus-stack.name" $ }}-prometheus +{{ include "kube-prometheus-stack.labels" $ | indent 8 }} + {{- if $ingressValues.labels }} +{{ toYaml $ingressValues.labels | indent 8 }} + {{- end }} + {{- if $ingressValues.annotations }} + annotations: +{{ toYaml $ingressValues.annotations | indent 8 }} + {{- end }} + spec: + {{- if or ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") ($.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1") }} + {{- if $ingressValues.ingressClassName }} + ingressClassName: {{ $ingressValues.ingressClassName }} + {{- end }} + {{- end }} + rules: + - host: {{ $ingressValues.hostPrefix }}-{{ $i }}.{{ $ingressValues.hostDomain }} + http: + paths: + {{- range $p := $ingressValues.paths }} + - path: {{ tpl $p $ }} + {{- if $pathType }} + pathType: {{ $pathType }} + {{- end }} + backend: + serviceName: {{ include "kube-prometheus-stack.fullname" $ }}-prometheus-{{ $i }} + servicePort: {{ $servicePort }} + {{- end -}} + {{- if or $ingressValues.tlsSecretName $ingressValues.tlsSecretPerReplica.enabled }} + tls: + - hosts: + - {{ $ingressValues.hostPrefix }}-{{ $i }}.{{ $ingressValues.hostDomain }} + {{- if $ingressValues.tlsSecretPerReplica.enabled }} + secretName: {{ $ingressValues.tlsSecretPerReplica.prefix }}-{{ $i }} + {{- else }} + secretName: {{ $ingressValues.tlsSecretName }} + {{- end }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/nginx-config.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/nginx-config.yaml new file mode 100755 index 000000000..3f346ca4c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/nginx-config.yaml @@ -0,0 +1,66 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-nginx-proxy-config + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.prometheus.annotations }} + annotations: +{{ toYaml .Values.prometheus.annotations | indent 4 }} +{{- end }} +data: + nginx.conf: |- + worker_processes auto; + error_log /dev/stdout warn; + pid /var/cache/nginx/nginx.pid; + + events { + worker_connections 1024; + } + + http { + include /etc/nginx/mime.types; + log_format main '[$time_local - $status] $remote_addr - $remote_user $request ($http_referer)'; + + proxy_connect_timeout 10; + proxy_read_timeout 180; + proxy_send_timeout 5; + proxy_buffering off; + proxy_cache_path /var/cache/nginx/cache levels=1:2 keys_zone=my_zone:100m inactive=1d max_size=10g; + + server { + listen 8081; + access_log off; + + gzip on; + gzip_min_length 1k; + gzip_comp_level 2; + gzip_types text/plain application/javascript application/x-javascript text/css application/xml text/javascript image/jpeg image/gif image/png; + gzip_vary on; + gzip_disable "MSIE [1-6]\."; + + proxy_set_header Host $host; + + location / { + proxy_cache my_zone; + proxy_cache_valid 200 302 1d; + proxy_cache_valid 301 30d; + proxy_cache_valid any 5m; + proxy_cache_bypass $http_cache_control; + add_header X-Proxy-Cache $upstream_cache_status; + add_header Cache-Control "public"; + + proxy_pass http://localhost:9090/; + + sub_filter_types text/html; + sub_filter_once off; + sub_filter 'var PATH_PREFIX = "";' 'var PATH_PREFIX = ".";'; + + if ($request_filename ~ .*\.(?:js|css|jpg|jpeg|gif|png|ico|cur|gz|svg|svgz|mp4|ogg|ogv|webm)$) { + expires 90d; + } + } + } + } diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/podDisruptionBudget.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/podDisruptionBudget.yaml new file mode 100755 index 000000000..573317a32 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/podDisruptionBudget.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.prometheus.enabled .Values.prometheus.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + {{- if .Values.prometheus.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.prometheus.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.prometheus.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.prometheus.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: prometheus + prometheus: {{ template "kube-prometheus-stack.fullname" . }}-prometheus +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/podmonitors.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/podmonitors.yaml new file mode 100755 index 000000000..95d568e13 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/podmonitors.yaml @@ -0,0 +1,37 @@ +{{- if and .Values.prometheus.enabled .Values.prometheus.additionalPodMonitors }} +apiVersion: v1 +kind: List +items: +{{- range .Values.prometheus.additionalPodMonitors }} + - apiVersion: monitoring.coreos.com/v1 + kind: PodMonitor + metadata: + name: {{ .name }} + namespace: {{ template "kube-prometheus-stack.namespace" $ }} + labels: + app: {{ template "kube-prometheus-stack.name" $ }}-prometheus +{{ include "kube-prometheus-stack.labels" $ | indent 8 }} + {{- if .additionalLabels }} +{{ toYaml .additionalLabels | indent 8 }} + {{- end }} + spec: + podMetricsEndpoints: +{{ toYaml .podMetricsEndpoints | indent 8 }} + {{- if .jobLabel }} + jobLabel: {{ .jobLabel }} + {{- end }} + {{- if .namespaceSelector }} + namespaceSelector: +{{ toYaml .namespaceSelector | indent 8 }} + {{- end }} + selector: +{{ toYaml .selector | indent 8 }} + {{- if .podTargetLabels }} + podTargetLabels: +{{ toYaml .podTargetLabels | indent 8 }} + {{- end }} + {{- if .sampleLimit }} + sampleLimit: {{ .sampleLimit }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/prometheus.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/prometheus.yaml new file mode 100755 index 000000000..9c30c814c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/prometheus.yaml @@ -0,0 +1,319 @@ +{{- if .Values.prometheus.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: Prometheus +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.prometheus.annotations }} + annotations: +{{ toYaml .Values.prometheus.annotations | indent 4 }} +{{- end }} +spec: + alerting: + alertmanagers: +{{- if .Values.prometheus.prometheusSpec.alertingEndpoints }} +{{ toYaml .Values.prometheus.prometheusSpec.alertingEndpoints | indent 6 }} +{{- else if .Values.alertmanager.enabled }} + - namespace: {{ template "kube-prometheus-stack.namespace" . }} + name: {{ template "kube-prometheus-stack.fullname" . }}-alertmanager + port: {{ .Values.alertmanager.alertmanagerSpec.portName }} + {{- if .Values.alertmanager.alertmanagerSpec.routePrefix }} + pathPrefix: "{{ .Values.alertmanager.alertmanagerSpec.routePrefix }}" + {{- end }} + apiVersion: {{ .Values.alertmanager.apiVersion }} +{{- else }} + [] +{{- end }} +{{- if .Values.prometheus.prometheusSpec.apiserverConfig }} + apiserverConfig: +{{ toYaml .Values.prometheus.prometheusSpec.apiserverConfig | indent 4}} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.image }} + image: {{ template "system_default_registry" . }}{{ .Values.prometheus.prometheusSpec.image.repository }}:{{ .Values.prometheus.prometheusSpec.image.tag }} + version: {{ .Values.prometheus.prometheusSpec.image.tag }} + {{- if .Values.prometheus.prometheusSpec.image.sha }} + sha: {{ .Values.prometheus.prometheusSpec.image.sha }} + {{- end }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.externalLabels }} + externalLabels: +{{ tpl (toYaml .Values.prometheus.prometheusSpec.externalLabels | indent 4) . }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.prometheusExternalLabelNameClear }} + prometheusExternalLabelName: "" +{{- else if .Values.prometheus.prometheusSpec.prometheusExternalLabelName }} + prometheusExternalLabelName: "{{ .Values.prometheus.prometheusSpec.prometheusExternalLabelName }}" +{{- end }} +{{- if .Values.prometheus.prometheusSpec.replicaExternalLabelNameClear }} + replicaExternalLabelName: "" +{{- else if .Values.prometheus.prometheusSpec.replicaExternalLabelName }} + replicaExternalLabelName: "{{ .Values.prometheus.prometheusSpec.replicaExternalLabelName }}" +{{- end }} +{{- if .Values.prometheus.prometheusSpec.externalUrl }} + externalUrl: "{{ tpl .Values.prometheus.prometheusSpec.externalUrl . }}" +{{- else if and .Values.prometheus.ingress.enabled .Values.prometheus.ingress.hosts }} + externalUrl: "http://{{ tpl (index .Values.prometheus.ingress.hosts 0) . }}{{ .Values.prometheus.prometheusSpec.routePrefix }}" +{{- else if not (or (kindIs "invalid" .Values.global.cattle.url) (kindIs "invalid" .Values.global.cattle.clusterId)) }} + externalUrl: "{{ .Values.global.cattle.url }}/k8s/clusters/{{ .Values.global.cattle.clusterId }}/api/v1/namespaces/{{ .Values.namespaceOverride }}/services/http:{{ template "kube-prometheus-stack.fullname" . }}-prometheus:{{ .Values.prometheus.service.port }}/proxy" +{{- else }} + externalUrl: http://{{ template "kube-prometheus-stack.fullname" . }}-prometheus.{{ template "kube-prometheus-stack.namespace" . }}:{{ .Values.prometheus.service.port }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }} + ignoreNamespaceSelectors: {{ .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }} +{{- end }} + nodeSelector: {{ include "linux-node-selector" . | nindent 4 }} +{{- if .Values.prometheus.prometheusSpec.nodeSelector }} +{{ toYaml .Values.prometheus.prometheusSpec.nodeSelector | indent 4 }} +{{- end }} + paused: {{ .Values.prometheus.prometheusSpec.paused }} + replicas: {{ .Values.prometheus.prometheusSpec.replicas }} + shards: {{ .Values.prometheus.prometheusSpec.shards }} + logLevel: {{ .Values.prometheus.prometheusSpec.logLevel }} + logFormat: {{ .Values.prometheus.prometheusSpec.logFormat }} + listenLocal: {{ .Values.prometheus.prometheusSpec.listenLocal }} + enableAdminAPI: {{ .Values.prometheus.prometheusSpec.enableAdminAPI }} +{{- if .Values.prometheus.prometheusSpec.scrapeInterval }} + scrapeInterval: {{ .Values.prometheus.prometheusSpec.scrapeInterval }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.scrapeTimeout }} + scrapeTimeout: {{ .Values.prometheus.prometheusSpec.scrapeTimeout }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.evaluationInterval }} + evaluationInterval: {{ .Values.prometheus.prometheusSpec.evaluationInterval }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.resources }} + resources: +{{ toYaml .Values.prometheus.prometheusSpec.resources | indent 4 }} +{{- end }} + retention: {{ .Values.prometheus.prometheusSpec.retention | quote }} +{{- if .Values.prometheus.prometheusSpec.retentionSize }} + retentionSize: {{ .Values.prometheus.prometheusSpec.retentionSize | quote }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.walCompression }} + walCompression: {{ .Values.prometheus.prometheusSpec.walCompression }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.routePrefix }} + routePrefix: {{ .Values.prometheus.prometheusSpec.routePrefix | quote }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.secrets }} + secrets: +{{ toYaml .Values.prometheus.prometheusSpec.secrets | indent 4 }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.configMaps }} + configMaps: +{{ toYaml .Values.prometheus.prometheusSpec.configMaps | indent 4 }} +{{- end }} + serviceAccountName: {{ template "kube-prometheus-stack.prometheus.serviceAccountName" . }} +{{- if .Values.prometheus.prometheusSpec.serviceMonitorSelector }} + serviceMonitorSelector: +{{ toYaml .Values.prometheus.prometheusSpec.serviceMonitorSelector | indent 4 }} +{{ else if .Values.prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues }} + serviceMonitorSelector: + matchLabels: + release: {{ $.Release.Name | quote }} +{{ else }} + serviceMonitorSelector: {} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.serviceMonitorNamespaceSelector }} + serviceMonitorNamespaceSelector: +{{ toYaml .Values.prometheus.prometheusSpec.serviceMonitorNamespaceSelector | indent 4 }} +{{ else }} + serviceMonitorNamespaceSelector: {} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.podMonitorSelector }} + podMonitorSelector: +{{ toYaml .Values.prometheus.prometheusSpec.podMonitorSelector | indent 4 }} +{{ else if .Values.prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues }} + podMonitorSelector: + matchLabels: + release: {{ $.Release.Name | quote }} +{{ else }} + podMonitorSelector: {} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.podMonitorNamespaceSelector }} + podMonitorNamespaceSelector: +{{ toYaml .Values.prometheus.prometheusSpec.podMonitorNamespaceSelector | indent 4 }} +{{ else }} + podMonitorNamespaceSelector: {} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.probeSelector }} + probeSelector: +{{ toYaml .Values.prometheus.prometheusSpec.probeSelector | indent 4 }} +{{ else if .Values.prometheus.prometheusSpec.probeSelectorNilUsesHelmValues }} + probeSelector: + matchLabels: + release: {{ $.Release.Name | quote }} +{{ else }} + probeSelector: {} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.probeNamespaceSelector }} + probeNamespaceSelector: +{{ toYaml .Values.prometheus.prometheusSpec.probeNamespaceSelector | indent 4 }} +{{ else }} + probeNamespaceSelector: {} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.remoteRead }} + remoteRead: +{{ toYaml .Values.prometheus.prometheusSpec.remoteRead | indent 4 }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.remoteWrite }} + remoteWrite: +{{ toYaml .Values.prometheus.prometheusSpec.remoteWrite | indent 4 }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.securityContext }} + securityContext: +{{ toYaml .Values.prometheus.prometheusSpec.securityContext | indent 4 }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.ruleNamespaceSelector }} + ruleNamespaceSelector: +{{ toYaml .Values.prometheus.prometheusSpec.ruleNamespaceSelector | indent 4 }} +{{ else }} + ruleNamespaceSelector: {} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.ruleSelector }} + ruleSelector: +{{ toYaml .Values.prometheus.prometheusSpec.ruleSelector | indent 4}} +{{- else if .Values.prometheus.prometheusSpec.ruleSelectorNilUsesHelmValues }} + ruleSelector: + matchLabels: + app: {{ template "kube-prometheus-stack.name" . }} + release: {{ $.Release.Name | quote }} +{{ else }} + ruleSelector: {} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.storageSpec }} + storage: +{{ toYaml .Values.prometheus.prometheusSpec.storageSpec | indent 4 }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.podMetadata }} + podMetadata: +{{ tpl (toYaml .Values.prometheus.prometheusSpec.podMetadata | indent 4) . }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.query }} + query: +{{ toYaml .Values.prometheus.prometheusSpec.query | indent 4}} +{{- end }} +{{- if or .Values.prometheus.prometheusSpec.podAntiAffinity .Values.prometheus.prometheusSpec.affinity }} + affinity: +{{- if .Values.prometheus.prometheusSpec.affinity }} +{{ toYaml .Values.prometheus.prometheusSpec.affinity | indent 4 }} +{{- end }} +{{- if eq .Values.prometheus.prometheusSpec.podAntiAffinity "hard" }} + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - topologyKey: {{ .Values.prometheus.prometheusSpec.podAntiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - {key: app, operator: In, values: [prometheus]} + - {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.fullname" . }}-prometheus]} +{{- else if eq .Values.prometheus.prometheusSpec.podAntiAffinity "soft" }} + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + topologyKey: {{ .Values.prometheus.prometheusSpec.podAntiAffinityTopologyKey }} + labelSelector: + matchExpressions: + - {key: app, operator: In, values: [prometheus]} + - {key: prometheus, operator: In, values: [{{ template "kube-prometheus-stack.fullname" . }}-prometheus]} +{{- end }} +{{- end }} + tolerations: {{ include "linux-node-tolerations" . | nindent 4 }} +{{- if .Values.prometheus.prometheusSpec.tolerations }} +{{ toYaml .Values.prometheus.prometheusSpec.tolerations | indent 4 }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.topologySpreadConstraints }} + topologySpreadConstraints: +{{ toYaml .Values.prometheus.prometheusSpec.topologySpreadConstraints | indent 4 }} +{{- end }} +{{- if .Values.global.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.global.imagePullSecrets | indent 4 }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.additionalScrapeConfigs }} + additionalScrapeConfigs: + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-scrape-confg + key: additional-scrape-configs.yaml +{{- end }} +{{- if .Values.prometheus.prometheusSpec.additionalScrapeConfigsSecret.enabled }} + additionalScrapeConfigs: + name: {{ .Values.prometheus.prometheusSpec.additionalScrapeConfigsSecret.name }} + key: {{ .Values.prometheus.prometheusSpec.additionalScrapeConfigsSecret.key }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.additionalAlertManagerConfigs }} + additionalAlertManagerConfigs: + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-am-confg + key: additional-alertmanager-configs.yaml +{{- end }} +{{- if .Values.prometheus.prometheusSpec.additionalAlertRelabelConfigs }} + additionalAlertRelabelConfigs: + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-am-relabel-confg + key: additional-alert-relabel-configs.yaml +{{- end }} +{{- if .Values.prometheus.prometheusSpec.containers }} + containers: +{{ tpl .Values.prometheus.prometheusSpec.containers $ | indent 4 }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.initContainers }} + initContainers: +{{ toYaml .Values.prometheus.prometheusSpec.initContainers | indent 4 }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.priorityClassName }} + priorityClassName: {{ .Values.prometheus.prometheusSpec.priorityClassName }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.thanos }} + thanos: +{{ toYaml .Values.prometheus.prometheusSpec.thanos | indent 4 }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.disableCompaction }} + disableCompaction: {{ .Values.prometheus.prometheusSpec.disableCompaction }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.portName }} + portName: {{ .Values.prometheus.prometheusSpec.portName }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.volumes }} + volumes: +{{ toYaml .Values.prometheus.prometheusSpec.volumes | indent 4 }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.volumeMounts }} + volumeMounts: +{{ toYaml .Values.prometheus.prometheusSpec.volumeMounts | indent 4 }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.arbitraryFSAccessThroughSMs }} + arbitraryFSAccessThroughSMs: +{{ toYaml .Values.prometheus.prometheusSpec.arbitraryFSAccessThroughSMs | indent 4 }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.overrideHonorLabels }} + overrideHonorLabels: {{ .Values.prometheus.prometheusSpec.overrideHonorLabels }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.overrideHonorTimestamps }} + overrideHonorTimestamps: {{ .Values.prometheus.prometheusSpec.overrideHonorTimestamps }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }} + ignoreNamespaceSelectors: {{ .Values.prometheus.prometheusSpec.ignoreNamespaceSelectors }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.enforcedNamespaceLabel }} + enforcedNamespaceLabel: {{ .Values.prometheus.prometheusSpec.enforcedNamespaceLabel }} +{{- $prometheusDefaultRulesExcludedFromEnforce := (include "rules.names" .) | fromYaml }} + prometheusRulesExcludedFromEnforce: +{{- range $prometheusDefaultRulesExcludedFromEnforce.rules }} + - ruleNamespace: "{{ template "kube-prometheus-stack.namespace" $ }}" + ruleName: "{{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) . | trunc 63 | trimSuffix "-" }}" +{{- end }} +{{- if .Values.prometheus.prometheusSpec.prometheusRulesExcludedFromEnforce }} +{{ toYaml .Values.prometheus.prometheusSpec.prometheusRulesExcludedFromEnforce | indent 4 }} +{{- end }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.queryLogFile }} + queryLogFile: {{ .Values.prometheus.prometheusSpec.queryLogFile }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.enforcedSampleLimit }} + enforcedSampleLimit: {{ .Values.prometheus.prometheusSpec.enforcedSampleLimit }} +{{- end }} +{{- if .Values.prometheus.prometheusSpec.allowOverlappingBlocks }} + allowOverlappingBlocks: {{ .Values.prometheus.prometheusSpec.allowOverlappingBlocks }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/psp-clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/psp-clusterrole.yaml new file mode 100755 index 000000000..a279fb241 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/psp-clusterrole.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-psp + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +rules: +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if semverCompare "> 1.15.0-0" $kubeTargetVersion }} +- apiGroups: ['policy'] +{{- else }} +- apiGroups: ['extensions'] +{{- end }} + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "kube-prometheus-stack.fullname" . }}-prometheus +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/psp-clusterrolebinding.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/psp-clusterrolebinding.yaml new file mode 100755 index 000000000..27b73b74b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/psp-clusterrolebinding.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-psp + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus-psp +subjects: + - kind: ServiceAccount + name: {{ template "kube-prometheus-stack.prometheus.serviceAccountName" . }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} +{{- end }} + diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/psp.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/psp.yaml new file mode 100755 index 000000000..08da5e124 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/psp.yaml @@ -0,0 +1,62 @@ +{{- if and .Values.prometheus.enabled .Values.global.rbac.create .Values.global.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus +{{- if .Values.global.rbac.pspAnnotations }} + annotations: +{{ toYaml .Values.global.rbac.pspAnnotations | indent 4 }} +{{- end }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + privileged: false + # Required to prevent escalations to root. + # allowPrivilegeEscalation: false + # This is redundant with non-root + disallow privilege escalation, + # but we can provide it for defense in depth. + #requiredDropCapabilities: + # - ALL + # Allow core volume types. + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' +{{- if .Values.prometheus.podSecurityPolicy.volumes }} +{{ toYaml .Values.prometheus.podSecurityPolicy.volumes | indent 4 }} +{{- end }} + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + # Permits the container to run with root privileges as well. + rule: 'RunAsAny' + seLinux: + # This policy assumes the nodes are using AppArmor rather than SELinux. + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 0 + max: 65535 + readOnlyRootFilesystem: false +{{- if .Values.prometheus.podSecurityPolicy.allowedCapabilities }} + allowedCapabilities: +{{ toYaml .Values.prometheus.podSecurityPolicy.allowedCapabilities | indent 4 }} +{{- end }} +{{- if .Values.prometheus.podSecurityPolicy.allowedHostPaths }} + allowedHostPaths: +{{ toYaml .Values.prometheus.podSecurityPolicy.allowedHostPaths | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/alertmanager.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/alertmanager.rules.yaml new file mode 100755 index 000000000..387a67715 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/alertmanager.rules.yaml @@ -0,0 +1,70 @@ +{{- /* +Generated from 'alertmanager.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.alertmanager }} +{{- $alertmanagerJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "alertmanager" }} +{{- $namespace := printf "%s" (include "kube-prometheus-stack.namespace" .) }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "alertmanager.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: alertmanager.rules + rules: + - alert: AlertmanagerConfigInconsistent + annotations: + message: 'The configuration of the instances of the Alertmanager cluster `{{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.service {{`}}`}}` are out of sync. + + {{`{{`}} range printf "alertmanager_config_hash{namespace=\"%s\",service=\"%s\"}" $labels.namespace $labels.service | query {{`}}`}} + + Configuration hash for pod {{`{{`}} .Labels.pod {{`}}`}} is "{{`{{`}} printf "%.f" .Value {{`}}`}}" + + {{`{{`}} end {{`}}`}} + + ' + expr: count by(namespace,service) (count_values by(namespace,service) ("config_hash", alertmanager_config_hash{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"})) != 1 + for: 5m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: AlertmanagerFailedReload + annotations: + message: Reloading Alertmanager's configuration has failed for {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod{{`}}`}}. + expr: alertmanager_config_last_reload_successful{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"} == 0 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: AlertmanagerMembersInconsistent + annotations: + message: Alertmanager has not found all other members of the cluster. + expr: |- + alertmanager_cluster_members{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"} + != on (service) GROUP_LEFT() + count by (service) (alertmanager_cluster_members{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"}) + for: 5m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/etcd.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/etcd.yaml new file mode 100755 index 000000000..85287315c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/etcd.yaml @@ -0,0 +1,181 @@ +{{- /* +Generated from 'etcd' group from https://raw.githubusercontent.com/etcd-io/website/master/content/docs/v3.4.0/op-guide/etcd3_alert.rules.yml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.etcd }} +{{- if (include "exporter.kubeEtcd.enabled" .)}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "etcd" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: etcd + rules: + - alert: etcdInsufficientMembers + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": insufficient members ({{`{{`}} $value {{`}}`}}).' + expr: sum(up{job=~".*etcd.*"} == bool 1) by (job) < ((count(up{job=~".*etcd.*"}) by (job) + 1) / 2) + for: 3m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdNoLeader + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": member {{`{{`}} $labels.instance {{`}}`}} has no leader.' + expr: etcd_server_has_leader{job=~".*etcd.*"} == 0 + for: 1m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighNumberOfLeaderChanges + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": instance {{`{{`}} $labels.instance {{`}}`}} has seen {{`{{`}} $value {{`}}`}} leader changes within the last hour.' + expr: rate(etcd_server_leader_changes_seen_total{job=~".*etcd.*"}[15m]) > 3 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighNumberOfFailedGRPCRequests + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": {{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.grpc_method {{`}}`}} failed on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: |- + 100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code!="OK"}[5m])) BY (job, instance, grpc_service, grpc_method) + / + sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) BY (job, instance, grpc_service, grpc_method) + > 1 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighNumberOfFailedGRPCRequests + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": {{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.grpc_method {{`}}`}} failed on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: |- + 100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code!="OK"}[5m])) BY (job, instance, grpc_service, grpc_method) + / + sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) BY (job, instance, grpc_service, grpc_method) + > 5 + for: 5m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdGRPCRequestsSlow + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": gRPC requests to {{`{{`}} $labels.grpc_method {{`}}`}} are taking {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: |- + histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=~".*etcd.*", grpc_type="unary"}[5m])) by (job, instance, grpc_service, grpc_method, le)) + > 0.15 + for: 10m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdMemberCommunicationSlow + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": member communication with {{`{{`}} $labels.To {{`}}`}} is taking {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: |- + histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket{job=~".*etcd.*"}[5m])) + > 0.15 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighNumberOfFailedProposals + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": {{`{{`}} $value {{`}}`}} proposal failures within the last hour on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: rate(etcd_server_proposals_failed_total{job=~".*etcd.*"}[15m]) > 5 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighFsyncDurations + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": 99th percentile fync durations are {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: |- + histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=~".*etcd.*"}[5m])) + > 0.5 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighCommitDurations + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": 99th percentile commit durations {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: |- + histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket{job=~".*etcd.*"}[5m])) + > 0.25 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighNumberOfFailedHTTPRequests + annotations: + message: '{{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.method {{`}}`}} failed on etcd instance {{`{{`}} $labels.instance {{`}}`}}' + expr: |- + sum(rate(etcd_http_failed_total{job=~".*etcd.*", code!="404"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job=~".*etcd.*"}[5m])) + BY (method) > 0.01 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighNumberOfFailedHTTPRequests + annotations: + message: '{{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.method {{`}}`}} failed on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: |- + sum(rate(etcd_http_failed_total{job=~".*etcd.*", code!="404"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job=~".*etcd.*"}[5m])) + BY (method) > 0.05 + for: 10m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHTTPRequestsSlow + annotations: + message: etcd instance {{`{{`}} $labels.instance {{`}}`}} HTTP requests to {{`{{`}} $labels.method {{`}}`}} are slow. + expr: |- + histogram_quantile(0.99, rate(etcd_http_successful_duration_seconds_bucket[5m])) + > 0.15 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/general.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/general.rules.yaml new file mode 100755 index 000000000..80771f4f8 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/general.rules.yaml @@ -0,0 +1,56 @@ +{{- /* +Generated from 'general.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.general }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "general.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: general.rules + rules: + - alert: TargetDown + annotations: + message: '{{`{{`}} printf "%.4g" $value {{`}}`}}% of the {{`{{`}} $labels.job {{`}}`}}/{{`{{`}} $labels.service {{`}}`}} targets in {{`{{`}} $labels.namespace {{`}}`}} namespace are down.' + expr: 100 * (count(up == 0) BY (job, namespace, service) / count(up) BY (job, namespace, service)) > 10 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: Watchdog + annotations: + message: 'This is an alert meant to ensure that the entire alerting pipeline is functional. + + This alert is always firing, therefore it should always be firing in Alertmanager + + and always fire against a receiver. There are integrations with various notification + + mechanisms that send a notification when this alert is not firing. For example the + + "DeadMansSnitch" integration in PagerDuty. + + ' + expr: vector(1) + labels: + severity: none +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/k8s.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/k8s.rules.yaml new file mode 100755 index 000000000..19511e8fb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/k8s.rules.yaml @@ -0,0 +1,117 @@ +{{- /* +Generated from 'k8s.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/kubernetes-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.k8s }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "k8s.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: k8s.rules + rules: + - expr: |- + sum by (cluster, namespace, pod, container) ( + rate(container_cpu_usage_seconds_total{job="kubelet", metrics_path="/metrics/cadvisor", image!="", container!="POD"}[5m]) + ) * on (cluster, namespace, pod) group_left(node) topk by (cluster, namespace, pod) ( + 1, max by(cluster, namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate + - expr: |- + container_memory_working_set_bytes{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, + max by(namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_working_set_bytes + - expr: |- + container_memory_rss{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, + max by(namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_rss + - expr: |- + container_memory_cache{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, + max by(namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_cache + - expr: |- + container_memory_swap{job="kubelet", metrics_path="/metrics/cadvisor", image!=""} + * on (namespace, pod) group_left(node) topk by(namespace, pod) (1, + max by(namespace, pod, node) (kube_pod_info{node!=""}) + ) + record: node_namespace_pod_container:container_memory_swap + - expr: |- + sum by (namespace) ( + sum by (namespace, pod) ( + max by (namespace, pod, container) ( + kube_pod_container_resource_requests_memory_bytes{job="kube-state-metrics"} + ) * on(namespace, pod) group_left() max by (namespace, pod) ( + kube_pod_status_phase{phase=~"Pending|Running"} == 1 + ) + ) + ) + record: namespace:kube_pod_container_resource_requests_memory_bytes:sum + - expr: |- + sum by (namespace) ( + sum by (namespace, pod) ( + max by (namespace, pod, container) ( + kube_pod_container_resource_requests_cpu_cores{job="kube-state-metrics"} + ) * on(namespace, pod) group_left() max by (namespace, pod) ( + kube_pod_status_phase{phase=~"Pending|Running"} == 1 + ) + ) + ) + record: namespace:kube_pod_container_resource_requests_cpu_cores:sum + - expr: |- + max by (cluster, namespace, workload, pod) ( + label_replace( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="ReplicaSet"}, + "replicaset", "$1", "owner_name", "(.*)" + ) * on(replicaset, namespace) group_left(owner_name) topk by(replicaset, namespace) ( + 1, max by (replicaset, namespace, owner_name) ( + kube_replicaset_owner{job="kube-state-metrics"} + ) + ), + "workload", "$1", "owner_name", "(.*)" + ) + ) + labels: + workload_type: deployment + record: namespace_workload_pod:kube_pod_owner:relabel + - expr: |- + max by (cluster, namespace, workload, pod) ( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="DaemonSet"}, + "workload", "$1", "owner_name", "(.*)" + ) + ) + labels: + workload_type: daemonset + record: namespace_workload_pod:kube_pod_owner:relabel + - expr: |- + max by (cluster, namespace, workload, pod) ( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="StatefulSet"}, + "workload", "$1", "owner_name", "(.*)" + ) + ) + labels: + workload_type: statefulset + record: namespace_workload_pod:kube_pod_owner:relabel +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-apiserver-availability.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-apiserver-availability.rules.yaml new file mode 100755 index 000000000..7b00b54a7 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-apiserver-availability.rules.yaml @@ -0,0 +1,160 @@ +{{- /* +Generated from 'kube-apiserver-availability.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/kubernetes-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserverAvailability }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-apiserver-availability.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - interval: 3m + name: kube-apiserver-availability.rules + rules: + - expr: |- + 1 - ( + ( + # write too slow + sum(increase(apiserver_request_duration_seconds_count{verb=~"POST|PUT|PATCH|DELETE"}[30d])) + - + sum(increase(apiserver_request_duration_seconds_bucket{verb=~"POST|PUT|PATCH|DELETE",le="1"}[30d])) + ) + + ( + # read too slow + sum(increase(apiserver_request_duration_seconds_count{verb=~"LIST|GET"}[30d])) + - + ( + ( + sum(increase(apiserver_request_duration_seconds_bucket{verb=~"LIST|GET",scope=~"resource|",le="0.1"}[30d])) + or + vector(0) + ) + + + sum(increase(apiserver_request_duration_seconds_bucket{verb=~"LIST|GET",scope="namespace",le="0.5"}[30d])) + + + sum(increase(apiserver_request_duration_seconds_bucket{verb=~"LIST|GET",scope="cluster",le="5"}[30d])) + ) + ) + + # errors + sum(code:apiserver_request_total:increase30d{code=~"5.."} or vector(0)) + ) + / + sum(code:apiserver_request_total:increase30d) + labels: + verb: all + record: apiserver_request:availability30d + - expr: |- + 1 - ( + sum(increase(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[30d])) + - + ( + # too slow + ( + sum(increase(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="0.1"}[30d])) + or + vector(0) + ) + + + sum(increase(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[30d])) + + + sum(increase(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[30d])) + ) + + + # errors + sum(code:apiserver_request_total:increase30d{verb="read",code=~"5.."} or vector(0)) + ) + / + sum(code:apiserver_request_total:increase30d{verb="read"}) + labels: + verb: read + record: apiserver_request:availability30d + - expr: |- + 1 - ( + ( + # too slow + sum(increase(apiserver_request_duration_seconds_count{verb=~"POST|PUT|PATCH|DELETE"}[30d])) + - + sum(increase(apiserver_request_duration_seconds_bucket{verb=~"POST|PUT|PATCH|DELETE",le="1"}[30d])) + ) + + + # errors + sum(code:apiserver_request_total:increase30d{verb="write",code=~"5.."} or vector(0)) + ) + / + sum(code:apiserver_request_total:increase30d{verb="write"}) + labels: + verb: write + record: apiserver_request:availability30d + - expr: avg_over_time(code_verb:apiserver_request_total:increase1h[30d]) * 24 * 30 + record: code_verb:apiserver_request_total:increase30d + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="LIST",code=~"2.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="GET",code=~"2.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="POST",code=~"2.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="PUT",code=~"2.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="PATCH",code=~"2.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="DELETE",code=~"2.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="LIST",code=~"3.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="GET",code=~"3.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="POST",code=~"3.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="PUT",code=~"3.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="PATCH",code=~"3.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="DELETE",code=~"3.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="LIST",code=~"4.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="GET",code=~"4.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="POST",code=~"4.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="PUT",code=~"4.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="PATCH",code=~"4.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="DELETE",code=~"4.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="LIST",code=~"5.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="GET",code=~"5.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="POST",code=~"5.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="PUT",code=~"5.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="PATCH",code=~"5.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code, verb) (increase(apiserver_request_total{job="apiserver",verb="DELETE",code=~"5.."}[1h])) + record: code_verb:apiserver_request_total:increase1h + - expr: sum by (code) (code_verb:apiserver_request_total:increase30d{verb=~"LIST|GET"}) + labels: + verb: read + record: code:apiserver_request_total:increase30d + - expr: sum by (code) (code_verb:apiserver_request_total:increase30d{verb=~"POST|PUT|PATCH|DELETE"}) + labels: + verb: write + record: code:apiserver_request_total:increase30d +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-apiserver-slos.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-apiserver-slos.yaml new file mode 100755 index 000000000..0f44ccc10 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-apiserver-slos.yaml @@ -0,0 +1,95 @@ +{{- /* +Generated from 'kube-apiserver-slos' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/kubernetes-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserverSlos }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-apiserver-slos" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kube-apiserver-slos + rules: + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapierrorbudgetburn + summary: The API server is burning too much error budget. + expr: |- + sum(apiserver_request:burnrate1h) > (14.40 * 0.01000) + and + sum(apiserver_request:burnrate5m) > (14.40 * 0.01000) + for: 2m + labels: + long: 1h + severity: critical + short: 5m +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapierrorbudgetburn + summary: The API server is burning too much error budget. + expr: |- + sum(apiserver_request:burnrate6h) > (6.00 * 0.01000) + and + sum(apiserver_request:burnrate30m) > (6.00 * 0.01000) + for: 15m + labels: + long: 6h + severity: critical + short: 30m +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapierrorbudgetburn + summary: The API server is burning too much error budget. + expr: |- + sum(apiserver_request:burnrate1d) > (3.00 * 0.01000) + and + sum(apiserver_request:burnrate2h) > (3.00 * 0.01000) + for: 1h + labels: + long: 1d + severity: warning + short: 2h +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeAPIErrorBudgetBurn + annotations: + description: The API server is burning too much error budget. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapierrorbudgetburn + summary: The API server is burning too much error budget. + expr: |- + sum(apiserver_request:burnrate3d) > (1.00 * 0.01000) + and + sum(apiserver_request:burnrate6h) > (1.00 * 0.01000) + for: 3h + labels: + long: 3d + severity: warning + short: 6h +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-apiserver.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-apiserver.rules.yaml new file mode 100755 index 000000000..eddc1e40f --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-apiserver.rules.yaml @@ -0,0 +1,358 @@ +{{- /* +Generated from 'kube-apiserver.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/kubernetes-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserver }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-apiserver.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kube-apiserver.rules + rules: + - expr: |- + ( + ( + # too slow + sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[1d])) + - + ( + ( + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="0.1"}[1d])) + or + vector(0) + ) + + + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[1d])) + + + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[1d])) + ) + ) + + + # errors + sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1d])) + ) + / + sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1d])) + labels: + verb: read + record: apiserver_request:burnrate1d + - expr: |- + ( + ( + # too slow + sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[1h])) + - + ( + ( + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="0.1"}[1h])) + or + vector(0) + ) + + + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[1h])) + + + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[1h])) + ) + ) + + + # errors + sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[1h])) + ) + / + sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[1h])) + labels: + verb: read + record: apiserver_request:burnrate1h + - expr: |- + ( + ( + # too slow + sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[2h])) + - + ( + ( + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="0.1"}[2h])) + or + vector(0) + ) + + + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[2h])) + + + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[2h])) + ) + ) + + + # errors + sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[2h])) + ) + / + sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[2h])) + labels: + verb: read + record: apiserver_request:burnrate2h + - expr: |- + ( + ( + # too slow + sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[30m])) + - + ( + ( + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="0.1"}[30m])) + or + vector(0) + ) + + + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[30m])) + + + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[30m])) + ) + ) + + + # errors + sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[30m])) + ) + / + sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[30m])) + labels: + verb: read + record: apiserver_request:burnrate30m + - expr: |- + ( + ( + # too slow + sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[3d])) + - + ( + ( + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="0.1"}[3d])) + or + vector(0) + ) + + + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[3d])) + + + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[3d])) + ) + ) + + + # errors + sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[3d])) + ) + / + sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[3d])) + labels: + verb: read + record: apiserver_request:burnrate3d + - expr: |- + ( + ( + # too slow + sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[5m])) + - + ( + ( + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="0.1"}[5m])) + or + vector(0) + ) + + + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[5m])) + + + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[5m])) + ) + ) + + + # errors + sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[5m])) + ) + / + sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m])) + labels: + verb: read + record: apiserver_request:burnrate5m + - expr: |- + ( + ( + # too slow + sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"LIST|GET"}[6h])) + - + ( + ( + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope=~"resource|",le="0.1"}[6h])) + or + vector(0) + ) + + + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="namespace",le="0.5"}[6h])) + + + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET",scope="cluster",le="5"}[6h])) + ) + ) + + + # errors + sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET",code=~"5.."}[6h])) + ) + / + sum(rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[6h])) + labels: + verb: read + record: apiserver_request:burnrate6h + - expr: |- + ( + ( + # too slow + sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d])) + - + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[1d])) + ) + + + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1d])) + ) + / + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1d])) + labels: + verb: write + record: apiserver_request:burnrate1d + - expr: |- + ( + ( + # too slow + sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h])) + - + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[1h])) + ) + + + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[1h])) + ) + / + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[1h])) + labels: + verb: write + record: apiserver_request:burnrate1h + - expr: |- + ( + ( + # too slow + sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h])) + - + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[2h])) + ) + + + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[2h])) + ) + / + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[2h])) + labels: + verb: write + record: apiserver_request:burnrate2h + - expr: |- + ( + ( + # too slow + sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m])) + - + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[30m])) + ) + + + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[30m])) + ) + / + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[30m])) + labels: + verb: write + record: apiserver_request:burnrate30m + - expr: |- + ( + ( + # too slow + sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d])) + - + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[3d])) + ) + + + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[3d])) + ) + / + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[3d])) + labels: + verb: write + record: apiserver_request:burnrate3d + - expr: |- + ( + ( + # too slow + sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) + - + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[5m])) + ) + + + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[5m])) + ) + / + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) + labels: + verb: write + record: apiserver_request:burnrate5m + - expr: |- + ( + ( + # too slow + sum(rate(apiserver_request_duration_seconds_count{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h])) + - + sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",le="1"}[6h])) + ) + + + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE",code=~"5.."}[6h])) + ) + / + sum(rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[6h])) + labels: + verb: write + record: apiserver_request:burnrate6h + - expr: sum by (code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"LIST|GET"}[5m])) + labels: + verb: read + record: code_resource:apiserver_request_total:rate5m + - expr: sum by (code,resource) (rate(apiserver_request_total{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m])) + labels: + verb: write + record: code_resource:apiserver_request_total:rate5m + - expr: histogram_quantile(0.99, sum by (le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"LIST|GET"}[5m]))) > 0 + labels: + quantile: '0.99' + verb: read + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.99, sum by (le, resource) (rate(apiserver_request_duration_seconds_bucket{job="apiserver",verb=~"POST|PUT|PATCH|DELETE"}[5m]))) > 0 + labels: + quantile: '0.99' + verb: write + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) + labels: + quantile: '0.99' + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.9, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) + labels: + quantile: '0.9' + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.5, sum(rate(apiserver_request_duration_seconds_bucket{job="apiserver",subresource!="log",verb!~"LIST|WATCH|WATCHLIST|DELETECOLLECTION|PROXY|CONNECT"}[5m])) without(instance, pod)) + labels: + quantile: '0.5' + record: cluster_quantile:apiserver_request_duration_seconds:histogram_quantile +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-prometheus-general.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-prometheus-general.rules.yaml new file mode 100755 index 000000000..e54bee587 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-prometheus-general.rules.yaml @@ -0,0 +1,31 @@ +{{- /* +Generated from 'kube-prometheus-general.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubePrometheusGeneral }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-prometheus-general.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kube-prometheus-general.rules + rules: + - expr: count without(instance, pod, node) (up == 1) + record: count:up1 + - expr: count without(instance, pod, node) (up == 0) + record: count:up0 +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-prometheus-node-recording.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-prometheus-node-recording.rules.yaml new file mode 100755 index 000000000..27271f1b5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-prometheus-node-recording.rules.yaml @@ -0,0 +1,39 @@ +{{- /* +Generated from 'kube-prometheus-node-recording.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubePrometheusNodeRecording }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-prometheus-node-recording.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kube-prometheus-node-recording.rules + rules: + - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[3m])) BY (instance) + record: instance:node_cpu:rate:sum + - expr: sum(rate(node_network_receive_bytes_total[3m])) BY (instance) + record: instance:node_network_receive_bytes:rate:sum + - expr: sum(rate(node_network_transmit_bytes_total[3m])) BY (instance) + record: instance:node_network_transmit_bytes:rate:sum + - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m])) WITHOUT (cpu, mode) / ON(instance) GROUP_LEFT() count(sum(node_cpu_seconds_total) BY (instance, cpu)) BY (instance) + record: instance:node_cpu:ratio + - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait",mode!="steal"}[5m])) + record: cluster:node_cpu:sum_rate5m + - expr: cluster:node_cpu_seconds_total:rate5m / count(sum(node_cpu_seconds_total) BY (instance, cpu)) + record: cluster:node_cpu:ratio +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml new file mode 100755 index 000000000..3c0ff31b0 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-scheduler.rules.yaml @@ -0,0 +1,65 @@ +{{- /* +Generated from 'kube-scheduler.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/kubernetes-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubeScheduler }} +{{- if (include "exporter.kubeScheduler.enabled" .)}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-scheduler.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kube-scheduler.rules + rules: + - expr: histogram_quantile(0.99, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) + labels: + quantile: '0.99' + record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.99, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) + labels: + quantile: '0.99' + record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.99, sum(rate(scheduler_binding_duration_seconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) + labels: + quantile: '0.99' + record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.9, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) + labels: + quantile: '0.9' + record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.9, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) + labels: + quantile: '0.9' + record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.9, sum(rate(scheduler_binding_duration_seconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) + labels: + quantile: '0.9' + record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.5, sum(rate(scheduler_e2e_scheduling_duration_seconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) + labels: + quantile: '0.5' + record: cluster_quantile:scheduler_e2e_scheduling_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.5, sum(rate(scheduler_scheduling_algorithm_duration_seconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) + labels: + quantile: '0.5' + record: cluster_quantile:scheduler_scheduling_algorithm_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.5, sum(rate(scheduler_binding_duration_seconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) + labels: + quantile: '0.5' + record: cluster_quantile:scheduler_binding_duration_seconds:histogram_quantile +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-state-metrics.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-state-metrics.yaml new file mode 100755 index 000000000..0fa5032ba --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kube-state-metrics.yaml @@ -0,0 +1,59 @@ +{{- /* +Generated from 'kube-state-metrics' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubeStateMetrics }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-state-metrics" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kube-state-metrics + rules: + - alert: KubeStateMetricsListErrors + annotations: + description: kube-state-metrics is experiencing errors at an elevated rate in list operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubestatemetricslisterrors + summary: kube-state-metrics is experiencing errors in list operations. + expr: |- + (sum(rate(kube_state_metrics_list_total{job="kube-state-metrics",result="error"}[5m])) + / + sum(rate(kube_state_metrics_list_total{job="kube-state-metrics"}[5m]))) + > 0.01 + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeStateMetricsWatchErrors + annotations: + description: kube-state-metrics is experiencing errors at an elevated rate in watch operations. This is likely causing it to not be able to expose metrics about Kubernetes objects correctly or at all. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubestatemetricswatcherrors + summary: kube-state-metrics is experiencing errors in watch operations. + expr: |- + (sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics",result="error"}[5m])) + / + sum(rate(kube_state_metrics_watch_total{job="kube-state-metrics"}[5m]))) + > 0.01 + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubelet.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubelet.rules.yaml new file mode 100755 index 000000000..8712b9ff5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubelet.rules.yaml @@ -0,0 +1,39 @@ +{{- /* +Generated from 'kubelet.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/kubernetes-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.kubelet.enabled .Values.defaultRules.rules.kubelet }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubelet.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kubelet.rules + rules: + - expr: histogram_quantile(0.99, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + labels: + quantile: '0.99' + record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.9, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + labels: + quantile: '0.9' + record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile + - expr: histogram_quantile(0.5, sum(rate(kubelet_pleg_relist_duration_seconds_bucket[5m])) by (instance, le) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"}) + labels: + quantile: '0.5' + record: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-apps.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-apps.yaml new file mode 100755 index 000000000..198bbb845 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-apps.yaml @@ -0,0 +1,298 @@ +{{- /* +Generated from 'kubernetes-apps' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/kubernetes-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesApps }} +{{- $targetNamespace := .Values.defaultRules.appNamespacesTarget }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-apps" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kubernetes-apps + rules: + - alert: KubePodCrashLooping + annotations: + description: Pod {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}} ({{`{{`}} $labels.container {{`}}`}}) is restarting {{`{{`}} printf "%.2f" $value {{`}}`}} times / 10 minutes. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepodcrashlooping + summary: Pod is crash looping. + expr: rate(kube_pod_container_status_restarts_total{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[10m]) * 60 * 5 > 0 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubePodNotReady + annotations: + description: Pod {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}} has been in a non-ready state for longer than 15 minutes. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepodnotready + summary: Pod has been in a non-ready state for more than 15 minutes. + expr: |- + sum by (namespace, pod) ( + max by(namespace, pod) ( + kube_pod_status_phase{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}", phase=~"Pending|Unknown"} + ) * on(namespace, pod) group_left(owner_kind) topk by(namespace, pod) ( + 1, max by(namespace, pod, owner_kind) (kube_pod_owner{owner_kind!="Job"}) + ) + ) > 0 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeDeploymentGenerationMismatch + annotations: + description: Deployment generation for {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.deployment {{`}}`}} does not match, this indicates that the Deployment has failed but has not been rolled back. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubedeploymentgenerationmismatch + summary: Deployment generation mismatch due to possible roll-back + expr: |- + kube_deployment_status_observed_generation{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + != + kube_deployment_metadata_generation{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeDeploymentReplicasMismatch + annotations: + description: Deployment {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.deployment {{`}}`}} has not matched the expected number of replicas for longer than 15 minutes. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubedeploymentreplicasmismatch + summary: Deployment has not matched the expected number of replicas. + expr: |- + ( + kube_deployment_spec_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + != + kube_deployment_status_replicas_available{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + ) and ( + changes(kube_deployment_status_replicas_updated{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[5m]) + == + 0 + ) + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeStatefulSetReplicasMismatch + annotations: + description: StatefulSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.statefulset {{`}}`}} has not matched the expected number of replicas for longer than 15 minutes. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubestatefulsetreplicasmismatch + summary: Deployment has not matched the expected number of replicas. + expr: |- + ( + kube_statefulset_status_replicas_ready{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + != + kube_statefulset_status_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + ) and ( + changes(kube_statefulset_status_replicas_updated{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[5m]) + == + 0 + ) + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeStatefulSetGenerationMismatch + annotations: + description: StatefulSet generation for {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.statefulset {{`}}`}} does not match, this indicates that the StatefulSet has failed but has not been rolled back. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubestatefulsetgenerationmismatch + summary: StatefulSet generation mismatch due to possible roll-back + expr: |- + kube_statefulset_status_observed_generation{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + != + kube_statefulset_metadata_generation{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeStatefulSetUpdateNotRolledOut + annotations: + description: StatefulSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.statefulset {{`}}`}} update has not been rolled out. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubestatefulsetupdatenotrolledout + summary: StatefulSet update has not been rolled out. + expr: |- + ( + max without (revision) ( + kube_statefulset_status_current_revision{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + unless + kube_statefulset_status_update_revision{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + ) + * + ( + kube_statefulset_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + != + kube_statefulset_status_replicas_updated{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + ) + ) and ( + changes(kube_statefulset_status_replicas_updated{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[5m]) + == + 0 + ) + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeDaemonSetRolloutStuck + annotations: + description: DaemonSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.daemonset {{`}}`}} has not finished or progressed for at least 15 minutes. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubedaemonsetrolloutstuck + summary: DaemonSet rollout is stuck. + expr: |- + ( + ( + kube_daemonset_status_current_number_scheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + != + kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + ) or ( + kube_daemonset_status_number_misscheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + != + 0 + ) or ( + kube_daemonset_updated_number_scheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + != + kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + ) or ( + kube_daemonset_status_number_available{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + != + kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + ) + ) and ( + changes(kube_daemonset_updated_number_scheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[5m]) + == + 0 + ) + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeContainerWaiting + annotations: + description: Pod {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}} container {{`{{`}} $labels.container{{`}}`}} has been in waiting state for longer than 1 hour. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubecontainerwaiting + summary: Pod container waiting longer than 1 hour + expr: sum by (namespace, pod, container) (kube_pod_container_status_waiting_reason{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}) > 0 + for: 1h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeDaemonSetNotScheduled + annotations: + description: '{{`{{`}} $value {{`}}`}} Pods of DaemonSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.daemonset {{`}}`}} are not scheduled.' + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubedaemonsetnotscheduled + summary: DaemonSet pods are not scheduled. + expr: |- + kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + - + kube_daemonset_status_current_number_scheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} > 0 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeDaemonSetMisScheduled + annotations: + description: '{{`{{`}} $value {{`}}`}} Pods of DaemonSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.daemonset {{`}}`}} are running where they are not supposed to run.' + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubedaemonsetmisscheduled + summary: DaemonSet pods are misscheduled. + expr: kube_daemonset_status_number_misscheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} > 0 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeJobCompletion + annotations: + description: Job {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.job_name {{`}}`}} is taking more than 12 hours to complete. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubejobcompletion + summary: Job did not complete in time + expr: kube_job_spec_completions{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - kube_job_status_succeeded{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} > 0 + for: 12h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeJobFailed + annotations: + description: Job {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.job_name {{`}}`}} failed to complete. Removing failed job after investigation should clear this alert. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubejobfailed + summary: Job failed to complete. + expr: kube_job_failed{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} > 0 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeHpaReplicasMismatch + annotations: + description: HPA {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.hpa {{`}}`}} has not matched the desired number of replicas for longer than 15 minutes. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubehpareplicasmismatch + summary: HPA has not matched descired number of replicas. + expr: |- + (kube_hpa_status_desired_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + != + kube_hpa_status_current_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}) + and + (kube_hpa_status_current_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + > + kube_hpa_spec_min_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}) + and + (kube_hpa_status_current_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + < + kube_hpa_spec_max_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}) + and + changes(kube_hpa_status_current_replicas[15m]) == 0 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeHpaMaxedOut + annotations: + description: HPA {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.hpa {{`}}`}} has been running at max replicas for longer than 15 minutes. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubehpamaxedout + summary: HPA is running at max replicas + expr: |- + kube_hpa_status_current_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + == + kube_hpa_spec_max_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-resources.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-resources.yaml new file mode 100755 index 000000000..898f8eed2 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-resources.yaml @@ -0,0 +1,159 @@ +{{- /* +Generated from 'kubernetes-resources' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/kubernetes-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesResources }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-resources" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kubernetes-resources + rules: + - alert: KubeCPUOvercommit + annotations: + description: Cluster has overcommitted CPU resource requests for Pods and cannot tolerate node failure. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubecpuovercommit + summary: Cluster has overcommitted CPU resource requests. + expr: |- + sum(namespace:kube_pod_container_resource_requests_cpu_cores:sum{}) + / + sum(kube_node_status_allocatable_cpu_cores) + > + (count(kube_node_status_allocatable_cpu_cores)-1) / count(kube_node_status_allocatable_cpu_cores) + for: 5m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeMemoryOvercommit + annotations: + description: Cluster has overcommitted memory resource requests for Pods and cannot tolerate node failure. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubememoryovercommit + summary: Cluster has overcommitted memory resource requests. + expr: |- + sum(namespace:kube_pod_container_resource_requests_memory_bytes:sum{}) + / + sum(kube_node_status_allocatable_memory_bytes) + > + (count(kube_node_status_allocatable_memory_bytes)-1) + / + count(kube_node_status_allocatable_memory_bytes) + for: 5m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeCPUQuotaOvercommit + annotations: + description: Cluster has overcommitted CPU resource requests for Namespaces. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubecpuquotaovercommit + summary: Cluster has overcommitted CPU resource requests. + expr: |- + sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="cpu"}) + / + sum(kube_node_status_allocatable_cpu_cores) + > 1.5 + for: 5m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeMemoryQuotaOvercommit + annotations: + description: Cluster has overcommitted memory resource requests for Namespaces. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubememoryquotaovercommit + summary: Cluster has overcommitted memory resource requests. + expr: |- + sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="memory"}) + / + sum(kube_node_status_allocatable_memory_bytes{job="kube-state-metrics"}) + > 1.5 + for: 5m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeQuotaAlmostFull + annotations: + description: Namespace {{`{{`}} $labels.namespace {{`}}`}} is using {{`{{`}} $value | humanizePercentage {{`}}`}} of its {{`{{`}} $labels.resource {{`}}`}} quota. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubequotaalmostfull + summary: Namespace quota is going to be full. + expr: |- + kube_resourcequota{job="kube-state-metrics", type="used"} + / ignoring(instance, job, type) + (kube_resourcequota{job="kube-state-metrics", type="hard"} > 0) + > 0.9 < 1 + for: 15m + labels: + severity: info +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeQuotaFullyUsed + annotations: + description: Namespace {{`{{`}} $labels.namespace {{`}}`}} is using {{`{{`}} $value | humanizePercentage {{`}}`}} of its {{`{{`}} $labels.resource {{`}}`}} quota. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubequotafullyused + summary: Namespace quota is fully used. + expr: |- + kube_resourcequota{job="kube-state-metrics", type="used"} + / ignoring(instance, job, type) + (kube_resourcequota{job="kube-state-metrics", type="hard"} > 0) + == 1 + for: 15m + labels: + severity: info +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeQuotaExceeded + annotations: + description: Namespace {{`{{`}} $labels.namespace {{`}}`}} is using {{`{{`}} $value | humanizePercentage {{`}}`}} of its {{`{{`}} $labels.resource {{`}}`}} quota. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubequotaexceeded + summary: Namespace quota has exceeded the limits. + expr: |- + kube_resourcequota{job="kube-state-metrics", type="used"} + / ignoring(instance, job, type) + (kube_resourcequota{job="kube-state-metrics", type="hard"} > 0) + > 1 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: CPUThrottlingHigh + annotations: + description: '{{`{{`}} $value | humanizePercentage {{`}}`}} throttling of CPU in namespace {{`{{`}} $labels.namespace {{`}}`}} for container {{`{{`}} $labels.container {{`}}`}} in pod {{`{{`}} $labels.pod {{`}}`}}.' + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-cputhrottlinghigh + summary: Processes experience elevated CPU throttling. + expr: |- + sum(increase(container_cpu_cfs_throttled_periods_total{container!="", }[5m])) by (container, pod, namespace) + / + sum(increase(container_cpu_cfs_periods_total{}[5m])) by (container, pod, namespace) + > ( 25 / 100 ) + for: 15m + labels: + severity: info +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-storage.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-storage.yaml new file mode 100755 index 000000000..527e6e308 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-storage.yaml @@ -0,0 +1,75 @@ +{{- /* +Generated from 'kubernetes-storage' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/kubernetes-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesStorage }} +{{- $targetNamespace := .Values.defaultRules.appNamespacesTarget }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-storage" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kubernetes-storage + rules: + - alert: KubePersistentVolumeFillingUp + annotations: + description: The PersistentVolume claimed by {{`{{`}} $labels.persistentvolumeclaim {{`}}`}} in Namespace {{`{{`}} $labels.namespace {{`}}`}} is only {{`{{`}} $value | humanizePercentage {{`}}`}} free. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepersistentvolumefillingup + summary: PersistentVolume is filling up. + expr: |- + kubelet_volume_stats_available_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}", metrics_path="/metrics"} + / + kubelet_volume_stats_capacity_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}", metrics_path="/metrics"} + < 0.03 + for: 1m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubePersistentVolumeFillingUp + annotations: + description: Based on recent sampling, the PersistentVolume claimed by {{`{{`}} $labels.persistentvolumeclaim {{`}}`}} in Namespace {{`{{`}} $labels.namespace {{`}}`}} is expected to fill up within four days. Currently {{`{{`}} $value | humanizePercentage {{`}}`}} is available. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepersistentvolumefillingup + summary: PersistentVolume is filling up. + expr: |- + ( + kubelet_volume_stats_available_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}", metrics_path="/metrics"} + / + kubelet_volume_stats_capacity_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}", metrics_path="/metrics"} + ) < 0.15 + and + predict_linear(kubelet_volume_stats_available_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}", metrics_path="/metrics"}[6h], 4 * 24 * 3600) < 0 + for: 1h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubePersistentVolumeErrors + annotations: + description: The persistent volume {{`{{`}} $labels.persistentvolume {{`}}`}} has status {{`{{`}} $labels.phase {{`}}`}}. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepersistentvolumeerrors + summary: PersistentVolume is having issues with provisioning. + expr: kube_persistentvolume_status_phase{phase=~"Failed|Pending",job="kube-state-metrics"} > 0 + for: 5m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-apiserver.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-apiserver.yaml new file mode 100755 index 000000000..2ed298b35 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-apiserver.yaml @@ -0,0 +1,98 @@ +{{- /* +Generated from 'kubernetes-system-apiserver' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/kubernetes-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesSystem }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-system-apiserver" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kubernetes-system-apiserver + rules: + - alert: KubeClientCertificateExpiration + annotations: + description: A client certificate used to authenticate to the apiserver is expiring in less than 7.0 days. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeclientcertificateexpiration + summary: Client certificate is about to expire. + expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and on(job) histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 604800 + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeClientCertificateExpiration + annotations: + description: A client certificate used to authenticate to the apiserver is expiring in less than 24.0 hours. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeclientcertificateexpiration + summary: Client certificate is about to expire. + expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and on(job) histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 86400 + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: AggregatedAPIErrors + annotations: + description: An aggregated API {{`{{`}} $labels.name {{`}}`}}/{{`{{`}} $labels.namespace {{`}}`}} has reported errors. It has appeared unavailable {{`{{`}} $value | humanize {{`}}`}} times averaged over the past 10m. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-aggregatedapierrors + summary: An aggregated API has reported errors. + expr: sum by(name, namespace)(increase(aggregator_unavailable_apiservice_count[10m])) > 4 + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: AggregatedAPIDown + annotations: + description: An aggregated API {{`{{`}} $labels.name {{`}}`}}/{{`{{`}} $labels.namespace {{`}}`}} has been only {{`{{`}} $value | humanize {{`}}`}}% available over the last 10m. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-aggregatedapidown + summary: An aggregated API is down. + expr: (1 - max by(name, namespace)(avg_over_time(aggregator_unavailable_apiservice[10m]))) * 100 < 85 + for: 5m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- if .Values.kubeApiServer.enabled }} + - alert: KubeAPIDown + annotations: + description: KubeAPI has disappeared from Prometheus target discovery. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapidown + summary: Target disappeared from Prometheus target discovery. + expr: absent(up{job="apiserver"} == 1) + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} + - alert: KubeAPITerminatedRequests + annotations: + description: The apiserver has terminated {{`{{`}} $value | humanizePercentage {{`}}`}} of its incoming requests. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapiterminatedrequests + summary: The apiserver has terminated {{`{{`}} $value | humanizePercentage {{`}}`}} of its incoming requests. + expr: sum(rate(apiserver_request_terminations_total{job="apiserver"}[10m])) / ( sum(rate(apiserver_request_total{job="apiserver"}[10m])) + sum(rate(apiserver_request_terminations_total{job="apiserver"}[10m])) ) > 0.20 + for: 5m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml new file mode 100755 index 000000000..bbb5f9e23 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-controller-manager.yaml @@ -0,0 +1,43 @@ +{{- /* +Generated from 'kubernetes-system-controller-manager' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/kubernetes-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create }} +{{- if (include "exporter.kubeControllerManager.enabled" .)}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-system-controller-manager" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kubernetes-system-controller-manager + rules: +{{- if (include "exporter.kubeControllerManager.enabled" .)}} + - alert: KubeControllerManagerDown + annotations: + description: KubeControllerManager has disappeared from Prometheus target discovery. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubecontrollermanagerdown + summary: Target disappeared from Prometheus target discovery. + expr: absent(up{job="{{ include "exporter.kubeControllerManager.jobName" . }}"} == 1) + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml new file mode 100755 index 000000000..4d536ec2d --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-kubelet.yaml @@ -0,0 +1,188 @@ +{{- /* +Generated from 'kubernetes-system-kubelet' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/kubernetes-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesSystem }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-system-kubelet" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kubernetes-system-kubelet + rules: + - alert: KubeNodeNotReady + annotations: + description: '{{`{{`}} $labels.node {{`}}`}} has been unready for more than 15 minutes.' + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubenodenotready + summary: Node is not ready. + expr: kube_node_status_condition{job="kube-state-metrics",condition="Ready",status="true"} == 0 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeNodeUnreachable + annotations: + description: '{{`{{`}} $labels.node {{`}}`}} is unreachable and some workloads may be rescheduled.' + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubenodeunreachable + summary: Node is unreachable. + expr: (kube_node_spec_taint{job="kube-state-metrics",key="node.kubernetes.io/unreachable",effect="NoSchedule"} unless ignoring(key,value) kube_node_spec_taint{job="kube-state-metrics",key=~"ToBeDeletedByClusterAutoscaler|cloud.google.com/impending-node-termination|aws-node-termination-handler/spot-itn"}) == 1 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeletTooManyPods + annotations: + description: Kubelet '{{`{{`}} $labels.node {{`}}`}}' is running at {{`{{`}} $value | humanizePercentage {{`}}`}} of its Pod capacity. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubelettoomanypods + summary: Kubelet is running at capacity. + expr: |- + count by(node) ( + (kube_pod_status_phase{job="kube-state-metrics",phase="Running"} == 1) * on(instance,pod,namespace,cluster) group_left(node) topk by(instance,pod,namespace,cluster) (1, kube_pod_info{job="kube-state-metrics"}) + ) + / + max by(node) ( + kube_node_status_capacity_pods{job="kube-state-metrics"} != 1 + ) > 0.95 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeNodeReadinessFlapping + annotations: + description: The readiness status of node {{`{{`}} $labels.node {{`}}`}} has changed {{`{{`}} $value {{`}}`}} times in the last 15 minutes. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubenodereadinessflapping + summary: Node readiness status is flapping. + expr: sum(changes(kube_node_status_condition{status="true",condition="Ready"}[15m])) by (node) > 2 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeletPlegDurationHigh + annotations: + description: The Kubelet Pod Lifecycle Event Generator has a 99th percentile duration of {{`{{`}} $value {{`}}`}} seconds on node {{`{{`}} $labels.node {{`}}`}}. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeletplegdurationhigh + summary: Kubelet Pod Lifecycle Event Generator is taking too long to relist. + expr: node_quantile:kubelet_pleg_relist_duration_seconds:histogram_quantile{quantile="0.99"} >= 10 + for: 5m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeletPodStartUpLatencyHigh + annotations: + description: Kubelet Pod startup 99th percentile latency is {{`{{`}} $value {{`}}`}} seconds on node {{`{{`}} $labels.node {{`}}`}}. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeletpodstartuplatencyhigh + summary: Kubelet Pod startup latency is too high. + expr: histogram_quantile(0.99, sum(rate(kubelet_pod_worker_duration_seconds_bucket{job="kubelet", metrics_path="/metrics"}[5m])) by (instance, le)) * on(instance) group_left(node) kubelet_node_name{job="kubelet", metrics_path="/metrics"} > 60 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeletClientCertificateExpiration + annotations: + description: Client certificate for Kubelet on node {{`{{`}} $labels.node {{`}}`}} expires in {{`{{`}} $value | humanizeDuration {{`}}`}}. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeletclientcertificateexpiration + summary: Kubelet client certificate is about to expire. + expr: kubelet_certificate_manager_client_ttl_seconds < 604800 + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeletClientCertificateExpiration + annotations: + description: Client certificate for Kubelet on node {{`{{`}} $labels.node {{`}}`}} expires in {{`{{`}} $value | humanizeDuration {{`}}`}}. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeletclientcertificateexpiration + summary: Kubelet client certificate is about to expire. + expr: kubelet_certificate_manager_client_ttl_seconds < 86400 + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeletServerCertificateExpiration + annotations: + description: Server certificate for Kubelet on node {{`{{`}} $labels.node {{`}}`}} expires in {{`{{`}} $value | humanizeDuration {{`}}`}}. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeletservercertificateexpiration + summary: Kubelet server certificate is about to expire. + expr: kubelet_certificate_manager_server_ttl_seconds < 604800 + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeletServerCertificateExpiration + annotations: + description: Server certificate for Kubelet on node {{`{{`}} $labels.node {{`}}`}} expires in {{`{{`}} $value | humanizeDuration {{`}}`}}. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeletservercertificateexpiration + summary: Kubelet server certificate is about to expire. + expr: kubelet_certificate_manager_server_ttl_seconds < 86400 + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeletClientCertificateRenewalErrors + annotations: + description: Kubelet on node {{`{{`}} $labels.node {{`}}`}} has failed to renew its client certificate ({{`{{`}} $value | humanize {{`}}`}} errors in the last 5 minutes). + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeletclientcertificaterenewalerrors + summary: Kubelet has failed to renew its client certificate. + expr: increase(kubelet_certificate_manager_client_expiration_renew_errors[5m]) > 0 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeletServerCertificateRenewalErrors + annotations: + description: Kubelet on node {{`{{`}} $labels.node {{`}}`}} has failed to renew its server certificate ({{`{{`}} $value | humanize {{`}}`}} errors in the last 5 minutes). + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeletservercertificaterenewalerrors + summary: Kubelet has failed to renew its server certificate. + expr: increase(kubelet_server_expiration_renew_errors[5m]) > 0 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- if .Values.prometheusOperator.kubeletService.enabled }} + - alert: KubeletDown + annotations: + description: Kubelet has disappeared from Prometheus target discovery. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeletdown + summary: Target disappeared from Prometheus target discovery. + expr: absent(up{job="kubelet", metrics_path="/metrics"} == 1) + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml new file mode 100755 index 000000000..f4f5589f4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system-scheduler.yaml @@ -0,0 +1,43 @@ +{{- /* +Generated from 'kubernetes-system-scheduler' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/kubernetes-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubeScheduler }} +{{- if (include "exporter.kubeScheduler.enabled" .)}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-system-scheduler" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kubernetes-system-scheduler + rules: +{{- if (include "exporter.kubeScheduler.enabled" .)}} + - alert: KubeSchedulerDown + annotations: + description: KubeScheduler has disappeared from Prometheus target discovery. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeschedulerdown + summary: Target disappeared from Prometheus target discovery. + expr: absent(up{job="{{ include "exporter.kubeScheduler.jobName" . }}"} == 1) + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system.yaml new file mode 100755 index 000000000..52230c62e --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/kubernetes-system.yaml @@ -0,0 +1,55 @@ +{{- /* +Generated from 'kubernetes-system' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/kubernetes-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesSystem }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-system" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kubernetes-system + rules: + - alert: KubeVersionMismatch + annotations: + description: There are {{`{{`}} $value {{`}}`}} different semantic versions of Kubernetes components running. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeversionmismatch + summary: Different semantic versions of Kubernetes components running. + expr: count(count by (gitVersion) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},"gitVersion","$1","gitVersion","(v[0-9]*.[0-9]*).*"))) > 1 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeClientErrors + annotations: + description: Kubernetes API server client '{{`{{`}} $labels.job {{`}}`}}/{{`{{`}} $labels.instance {{`}}`}}' is experiencing {{`{{`}} $value | humanizePercentage {{`}}`}} errors.' + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeclienterrors + summary: Kubernetes API server client is experiencing errors. + expr: |- + (sum(rate(rest_client_requests_total{code=~"5.."}[5m])) by (instance, job) + / + sum(rate(rest_client_requests_total[5m])) by (instance, job)) + > 0.01 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node-exporter.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node-exporter.rules.yaml new file mode 100755 index 000000000..ddb737647 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node-exporter.rules.yaml @@ -0,0 +1,79 @@ +{{- /* +Generated from 'node-exporter.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/node-exporter-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.node }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "node-exporter.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: node-exporter.rules + rules: + - expr: |- + count without (cpu) ( + count without (mode) ( + node_cpu_seconds_total{job="node-exporter"} + ) + ) + record: instance:node_num_cpu:sum + - expr: |- + 1 - avg without (cpu, mode) ( + rate(node_cpu_seconds_total{job="node-exporter", mode="idle"}[1m]) + ) + record: instance:node_cpu_utilisation:rate1m + - expr: |- + ( + node_load1{job="node-exporter"} + / + instance:node_num_cpu:sum{job="node-exporter"} + ) + record: instance:node_load1_per_cpu:ratio + - expr: |- + 1 - ( + node_memory_MemAvailable_bytes{job="node-exporter"} + / + node_memory_MemTotal_bytes{job="node-exporter"} + ) + record: instance:node_memory_utilisation:ratio + - expr: rate(node_vmstat_pgmajfault{job="node-exporter"}[1m]) + record: instance:node_vmstat_pgmajfault:rate1m + - expr: rate(node_disk_io_time_seconds_total{job="node-exporter", device=~"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"}[1m]) + record: instance_device:node_disk_io_time_seconds:rate1m + - expr: rate(node_disk_io_time_weighted_seconds_total{job="node-exporter", device=~"mmcblk.p.+|nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+|dasd.+"}[1m]) + record: instance_device:node_disk_io_time_weighted_seconds:rate1m + - expr: |- + sum without (device) ( + rate(node_network_receive_bytes_total{job="node-exporter", device!="lo"}[1m]) + ) + record: instance:node_network_receive_bytes_excluding_lo:rate1m + - expr: |- + sum without (device) ( + rate(node_network_transmit_bytes_total{job="node-exporter", device!="lo"}[1m]) + ) + record: instance:node_network_transmit_bytes_excluding_lo:rate1m + - expr: |- + sum without (device) ( + rate(node_network_receive_drop_total{job="node-exporter", device!="lo"}[1m]) + ) + record: instance:node_network_receive_drop_excluding_lo:rate1m + - expr: |- + sum without (device) ( + rate(node_network_transmit_drop_total{job="node-exporter", device!="lo"}[1m]) + ) + record: instance:node_network_transmit_drop_excluding_lo:rate1m +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node-exporter.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node-exporter.yaml new file mode 100755 index 000000000..3be497c1f --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node-exporter.yaml @@ -0,0 +1,262 @@ +{{- /* +Generated from 'node-exporter' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/node-exporter-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.node }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "node-exporter" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: node-exporter + rules: + - alert: NodeFilesystemSpaceFillingUp + annotations: + description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available space left and is filling up. + summary: Filesystem is predicted to run out of space within the next 24 hours. + expr: |- + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 40 + and + predict_linear(node_filesystem_avail_bytes{job="node-exporter",fstype!=""}[6h], 24*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeFilesystemSpaceFillingUp + annotations: + description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available space left and is filling up fast. + summary: Filesystem is predicted to run out of space within the next 4 hours. + expr: |- + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 15 + and + predict_linear(node_filesystem_avail_bytes{job="node-exporter",fstype!=""}[6h], 4*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeFilesystemAlmostOutOfSpace + annotations: + description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available space left. + summary: Filesystem has less than 5% space left. + expr: |- + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 5 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeFilesystemAlmostOutOfSpace + annotations: + description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available space left. + summary: Filesystem has less than 3% space left. + expr: |- + ( + node_filesystem_avail_bytes{job="node-exporter",fstype!=""} / node_filesystem_size_bytes{job="node-exporter",fstype!=""} * 100 < 3 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeFilesystemFilesFillingUp + annotations: + description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available inodes left and is filling up. + summary: Filesystem is predicted to run out of inodes within the next 24 hours. + expr: |- + ( + node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 40 + and + predict_linear(node_filesystem_files_free{job="node-exporter",fstype!=""}[6h], 24*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeFilesystemFilesFillingUp + annotations: + description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available inodes left and is filling up fast. + summary: Filesystem is predicted to run out of inodes within the next 4 hours. + expr: |- + ( + node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 20 + and + predict_linear(node_filesystem_files_free{job="node-exporter",fstype!=""}[6h], 4*60*60) < 0 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeFilesystemAlmostOutOfFiles + annotations: + description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available inodes left. + summary: Filesystem has less than 5% inodes left. + expr: |- + ( + node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 5 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeFilesystemAlmostOutOfFiles + annotations: + description: Filesystem on {{`{{`}} $labels.device {{`}}`}} at {{`{{`}} $labels.instance {{`}}`}} has only {{`{{`}} printf "%.2f" $value {{`}}`}}% available inodes left. + summary: Filesystem has less than 3% inodes left. + expr: |- + ( + node_filesystem_files_free{job="node-exporter",fstype!=""} / node_filesystem_files{job="node-exporter",fstype!=""} * 100 < 3 + and + node_filesystem_readonly{job="node-exporter",fstype!=""} == 0 + ) + for: 1h + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeNetworkReceiveErrs + annotations: + description: '{{`{{`}} $labels.instance {{`}}`}} interface {{`{{`}} $labels.device {{`}}`}} has encountered {{`{{`}} printf "%.0f" $value {{`}}`}} receive errors in the last two minutes.' + summary: Network interface is reporting many receive errors. + expr: rate(node_network_receive_errs_total[2m]) / rate(node_network_receive_packets_total[2m]) > 0.01 + for: 1h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeNetworkTransmitErrs + annotations: + description: '{{`{{`}} $labels.instance {{`}}`}} interface {{`{{`}} $labels.device {{`}}`}} has encountered {{`{{`}} printf "%.0f" $value {{`}}`}} transmit errors in the last two minutes.' + summary: Network interface is reporting many transmit errors. + expr: rate(node_network_transmit_errs_total[2m]) / rate(node_network_transmit_packets_total[2m]) > 0.01 + for: 1h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeHighNumberConntrackEntriesUsed + annotations: + description: '{{`{{`}} $value | humanizePercentage {{`}}`}} of conntrack entries are used.' + summary: Number of conntrack are getting close to the limit. + expr: (node_nf_conntrack_entries / node_nf_conntrack_entries_limit) > 0.75 + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeTextFileCollectorScrapeError + annotations: + description: Node Exporter text file collector failed to scrape. + summary: Node Exporter text file collector failed to scrape. + expr: node_textfile_scrape_error{job="node-exporter"} == 1 + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeClockSkewDetected + annotations: + message: Clock on {{`{{`}} $labels.instance {{`}}`}} is out of sync by more than 300s. Ensure NTP is configured correctly on this host. + summary: Clock skew detected. + expr: |- + ( + node_timex_offset_seconds > 0.05 + and + deriv(node_timex_offset_seconds[5m]) >= 0 + ) + or + ( + node_timex_offset_seconds < -0.05 + and + deriv(node_timex_offset_seconds[5m]) <= 0 + ) + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeClockNotSynchronising + annotations: + message: Clock on {{`{{`}} $labels.instance {{`}}`}} is not synchronising. Ensure NTP is configured on this host. + summary: Clock not synchronising. + expr: |- + min_over_time(node_timex_sync_status[5m]) == 0 + and + node_timex_maxerror_seconds >= 16 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeRAIDDegraded + annotations: + description: RAID array '{{`{{`}} $labels.device {{`}}`}}' on {{`{{`}} $labels.instance {{`}}`}} is in degraded state due to one or more disks failures. Number of spare drives is insufficient to fix issue automatically. + summary: RAID Array is degraded + expr: node_md_disks_required - ignoring (state) (node_md_disks{state="active"}) > 0 + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeRAIDDiskFailure + annotations: + description: At least one device in RAID array on {{`{{`}} $labels.instance {{`}}`}} failed. Array '{{`{{`}} $labels.device {{`}}`}}' needs attention and possibly a disk swap. + summary: Failed device in RAID array + expr: node_md_disks{state="fail"} > 0 + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node-network.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node-network.yaml new file mode 100755 index 000000000..9a6955ae9 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node-network.yaml @@ -0,0 +1,37 @@ +{{- /* +Generated from 'node-network' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.network }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "node-network" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: node-network + rules: + - alert: NodeNetworkInterfaceFlapping + annotations: + message: Network interface "{{`{{`}} $labels.device {{`}}`}}" changing it's up status often on node-exporter {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}}" + expr: changes(node_network_up{job="node-exporter",device!~"veth.+"}[2m]) > 2 + for: 2m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node.rules.yaml new file mode 100755 index 000000000..c841e6f6e --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/node.rules.yaml @@ -0,0 +1,51 @@ +{{- /* +Generated from 'node.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/kubernetes-prometheusRule.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.node }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "node.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: node.rules + rules: + - expr: |- + topk by(namespace, pod) (1, + max by (node, namespace, pod) ( + label_replace(kube_pod_info{job="kube-state-metrics",node!=""}, "pod", "$1", "pod", "(.*)") + )) + record: 'node_namespace_pod:kube_pod_info:' + - expr: |- + count by (cluster, node) (sum by (node, cpu) ( + node_cpu_seconds_total{job="node-exporter"} + * on (namespace, pod) group_left(node) + node_namespace_pod:kube_pod_info: + )) + record: node:node_num_cpu:sum + - expr: |- + sum( + node_memory_MemAvailable_bytes{job="node-exporter"} or + ( + node_memory_Buffers_bytes{job="node-exporter"} + + node_memory_Cached_bytes{job="node-exporter"} + + node_memory_MemFree_bytes{job="node-exporter"} + + node_memory_Slab_bytes{job="node-exporter"} + ) + ) by (cluster) + record: :node_memory_MemAvailable_bytes:sum +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/prometheus-operator.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/prometheus-operator.yaml new file mode 100755 index 000000000..d1c1f6545 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/prometheus-operator.yaml @@ -0,0 +1,113 @@ +{{- /* +Generated from 'prometheus-operator' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.prometheusOperator }} +{{- $operatorJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "operator" }} +{{- $namespace := printf "%s" (include "kube-prometheus-stack.namespace" .) }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus-operator" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: prometheus-operator + rules: + - alert: PrometheusOperatorListErrors + annotations: + description: Errors while performing List operations in controller {{`{{`}}$labels.controller{{`}}`}} in {{`{{`}}$labels.namespace{{`}}`}} namespace. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-prometheusoperatorlisterrors + summary: Errors while performing list operations in controller. + expr: (sum by (controller,namespace) (rate(prometheus_operator_list_operations_failed_total{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[10m])) / sum by (controller,namespace) (rate(prometheus_operator_list_operations_total{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[10m]))) > 0.4 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusOperatorWatchErrors + annotations: + description: Errors while performing watch operations in controller {{`{{`}}$labels.controller{{`}}`}} in {{`{{`}}$labels.namespace{{`}}`}} namespace. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-prometheusoperatorwatcherrors + summary: Errors while performing watch operations in controller. + expr: (sum by (controller,namespace) (rate(prometheus_operator_watch_operations_failed_total{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[10m])) / sum by (controller,namespace) (rate(prometheus_operator_watch_operations_total{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[10m]))) > 0.4 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusOperatorSyncFailed + annotations: + description: Controller {{`{{`}} $labels.controller {{`}}`}} in {{`{{`}} $labels.namespace {{`}}`}} namespace fails to reconcile {{`{{`}} $value {{`}}`}} objects. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-prometheusoperatorsyncfailed + summary: Last controller reconciliation failed + expr: min_over_time(prometheus_operator_syncs{status="failed",job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[5m]) > 0 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusOperatorReconcileErrors + annotations: + description: '{{`{{`}} $value | humanizePercentage {{`}}`}} of reconciling operations failed for {{`{{`}} $labels.controller {{`}}`}} controller in {{`{{`}} $labels.namespace {{`}}`}} namespace.' + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-prometheusoperatorreconcileerrors + summary: Errors while reconciling controller. + expr: (sum by (controller,namespace) (rate(prometheus_operator_reconcile_errors_total{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[5m]))) / (sum by (controller,namespace) (rate(prometheus_operator_reconcile_operations_total{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[5m]))) > 0.1 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusOperatorNodeLookupErrors + annotations: + description: Errors while reconciling Prometheus in {{`{{`}} $labels.namespace {{`}}`}} Namespace. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-prometheusoperatornodelookuperrors + summary: Errors while reconciling Prometheus. + expr: rate(prometheus_operator_node_address_lookup_errors_total{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[5m]) > 0.1 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusOperatorNotReady + annotations: + description: Prometheus operator in {{`{{`}} $labels.namespace {{`}}`}} namespace isn't ready to reconcile {{`{{`}} $labels.controller {{`}}`}} resources. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-prometheusoperatornotready + summary: Prometheus operator not ready + expr: min by(namespace, controller) (max_over_time(prometheus_operator_ready{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[5m]) == 0) + for: 5m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusOperatorRejectedResources + annotations: + description: Prometheus operator in {{`{{`}} $labels.namespace {{`}}`}} namespace rejected {{`{{`}} printf "%0.0f" $value {{`}}`}} {{`{{`}} $labels.controller {{`}}`}}/{{`{{`}} $labels.resource {{`}}`}} resources. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-prometheusoperatorrejectedresources + summary: Resources rejected by Prometheus operator + expr: min_over_time(prometheus_operator_managed_resources{state="rejected",job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[5m]) > 0 + for: 5m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/prometheus.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/prometheus.yaml new file mode 100755 index 000000000..c9c805eea --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules-1.14/prometheus.yaml @@ -0,0 +1,258 @@ +{{- /* +Generated from 'prometheus' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/master/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.14.0-0" $kubeTargetVersion) (semverCompare "<9.9.9-9" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.prometheus }} +{{- $prometheusJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus" }} +{{- $namespace := printf "%s" (include "kube-prometheus-stack.namespace" .) }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: prometheus + rules: + - alert: PrometheusBadConfig + annotations: + description: Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} has failed to reload its configuration. + summary: Failed Prometheus configuration reload. + expr: |- + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + max_over_time(prometheus_config_last_reload_successful{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) == 0 + for: 10m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusNotificationQueueRunningFull + annotations: + description: Alert notification queue of Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} is running full. + summary: Prometheus alert notification queue predicted to run full in less than 30m. + expr: |- + # Without min_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + ( + predict_linear(prometheus_notifications_queue_length{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m], 60 * 30) + > + min_over_time(prometheus_notifications_queue_capacity{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) + ) + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusErrorSendingAlertsToSomeAlertmanagers + annotations: + description: '{{`{{`}} printf "%.1f" $value {{`}}`}}% errors while sending alerts from Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} to Alertmanager {{`{{`}}$labels.alertmanager{{`}}`}}.' + summary: Prometheus has encountered more than 1% errors sending alerts to a specific Alertmanager. + expr: |- + ( + rate(prometheus_notifications_errors_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) + / + rate(prometheus_notifications_sent_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) + ) + * 100 + > 1 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusErrorSendingAlertsToAnyAlertmanager + annotations: + description: '{{`{{`}} printf "%.1f" $value {{`}}`}}% minimum errors while sending alerts from Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} to any Alertmanager.' + summary: Prometheus encounters more than 3% errors sending alerts to any Alertmanager. + expr: |- + min without(alertmanager) ( + rate(prometheus_notifications_errors_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) + / + rate(prometheus_notifications_sent_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) + ) + * 100 + > 3 + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusNotConnectedToAlertmanagers + annotations: + description: Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} is not connected to any Alertmanagers. + summary: Prometheus is not connected to any Alertmanagers. + expr: |- + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + max_over_time(prometheus_notifications_alertmanagers_discovered{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) < 1 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusTSDBReloadsFailing + annotations: + description: Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} has detected {{`{{`}}$value | humanize{{`}}`}} reload failures over the last 3h. + summary: Prometheus has issues reloading blocks from disk. + expr: increase(prometheus_tsdb_reloads_failures_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[3h]) > 0 + for: 4h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusTSDBCompactionsFailing + annotations: + description: Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} has detected {{`{{`}}$value | humanize{{`}}`}} compaction failures over the last 3h. + summary: Prometheus has issues compacting blocks. + expr: increase(prometheus_tsdb_compactions_failed_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[3h]) > 0 + for: 4h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusNotIngestingSamples + annotations: + description: Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} is not ingesting samples. + summary: Prometheus is not ingesting samples. + expr: rate(prometheus_tsdb_head_samples_appended_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) <= 0 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusDuplicateTimestamps + annotations: + description: Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} is dropping {{`{{`}} printf "%.4g" $value {{`}}`}} samples/s with different values but duplicated timestamp. + summary: Prometheus is dropping samples with duplicate timestamps. + expr: rate(prometheus_target_scrapes_sample_duplicate_timestamp_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) > 0 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusOutOfOrderTimestamps + annotations: + description: Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} is dropping {{`{{`}} printf "%.4g" $value {{`}}`}} samples/s with timestamps arriving out of order. + summary: Prometheus drops samples with out-of-order timestamps. + expr: rate(prometheus_target_scrapes_sample_out_of_order_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) > 0 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusRemoteStorageFailures + annotations: + description: Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} failed to send {{`{{`}} printf "%.1f" $value {{`}}`}}% of the samples to {{`{{`}} $labels.remote_name{{`}}`}}:{{`{{`}} $labels.url {{`}}`}} + summary: Prometheus fails to send samples to remote storage. + expr: |- + ( + rate(prometheus_remote_storage_failed_samples_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) + / + ( + rate(prometheus_remote_storage_failed_samples_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) + + + rate(prometheus_remote_storage_succeeded_samples_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) + ) + ) + * 100 + > 1 + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusRemoteWriteBehind + annotations: + description: Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} remote write is {{`{{`}} printf "%.1f" $value {{`}}`}}s behind for {{`{{`}} $labels.remote_name{{`}}`}}:{{`{{`}} $labels.url {{`}}`}}. + summary: Prometheus remote write is behind. + expr: |- + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + ( + max_over_time(prometheus_remote_storage_highest_timestamp_in_seconds{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) + - on(job, instance) group_right + max_over_time(prometheus_remote_storage_queue_highest_sent_timestamp_seconds{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) + ) + > 120 + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusRemoteWriteDesiredShards + annotations: + description: Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} remote write desired shards calculation wants to run {{`{{`}} $value {{`}}`}} shards for queue {{`{{`}} $labels.remote_name{{`}}`}}:{{`{{`}} $labels.url {{`}}`}}, which is more than the max of {{`{{`}} printf `prometheus_remote_storage_shards_max{instance="%s",job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}` $labels.instance | query | first | value {{`}}`}}. + summary: Prometheus remote write desired shards calculation wants to run more than configured max shards. + expr: |- + # Without max_over_time, failed scrapes could create false negatives, see + # https://www.robustperception.io/alerting-on-gauges-in-prometheus-2-0 for details. + ( + max_over_time(prometheus_remote_storage_shards_desired{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) + > + max_over_time(prometheus_remote_storage_shards_max{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) + ) + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusRuleFailures + annotations: + description: Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} has failed to evaluate {{`{{`}} printf "%.0f" $value {{`}}`}} rules in the last 5m. + summary: Prometheus is failing rule evaluations. + expr: increase(prometheus_rule_evaluation_failures_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) > 0 + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusMissingRuleEvaluations + annotations: + description: Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} has missed {{`{{`}} printf "%.0f" $value {{`}}`}} rule group evaluations in the last 5m. + summary: Prometheus is missing rule evaluations due to slow rule group evaluation. + expr: increase(prometheus_rule_group_iterations_missed_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) > 0 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusTargetLimitHit + annotations: + description: Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} has dropped {{`{{`}} printf "%.0f" $value {{`}}`}} targets because the number of targets exceeded the configured target_limit. + summary: Prometheus has dropped targets because some scrape configs have exceeded the targets limit. + expr: increase(prometheus_target_scrape_pool_exceeded_target_limit_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) > 0 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/alertmanager.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/alertmanager.rules.yaml new file mode 100755 index 000000000..71159849c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/alertmanager.rules.yaml @@ -0,0 +1,63 @@ +{{- /* +Generated from 'alertmanager.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.alertmanager }} +{{- $operatorJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "operator" }} +{{- $alertmanagerJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "alertmanager" }} +{{- $namespace := printf "%s" (include "kube-prometheus-stack.namespace" .) }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "alertmanager.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: alertmanager.rules + rules: + - alert: AlertmanagerConfigInconsistent + annotations: + message: The configuration of the instances of the Alertmanager cluster `{{`{{`}}$labels.service{{`}}`}}` are out of sync. + expr: count_values("config_hash", alertmanager_config_hash{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"}) BY (service) / ON(service) GROUP_LEFT() label_replace(max(prometheus_operator_spec_replicas{job="{{ $operatorJob }}",namespace="{{ $namespace }}",controller="alertmanager"}) by (name, job, namespace, controller), "service", "$1", "name", "(.*)") != 1 + for: 5m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: AlertmanagerFailedReload + annotations: + message: Reloading Alertmanager's configuration has failed for {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod{{`}}`}}. + expr: alertmanager_config_last_reload_successful{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"} == 0 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: AlertmanagerMembersInconsistent + annotations: + message: Alertmanager has not found all other members of the cluster. + expr: |- + alertmanager_cluster_members{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"} + != on (service) GROUP_LEFT() + count by (service) (alertmanager_cluster_members{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"}) + for: 5m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/etcd.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/etcd.yaml new file mode 100755 index 000000000..048410bc3 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/etcd.yaml @@ -0,0 +1,181 @@ +{{- /* +Generated from 'etcd' group from https://raw.githubusercontent.com/etcd-io/website/master/content/docs/v3.4.0/op-guide/etcd3_alert.rules.yml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.etcd }} +{{- if (include "exporter.kubeEtcd.enabled" .)}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "etcd" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: etcd + rules: + - alert: etcdInsufficientMembers + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": insufficient members ({{`{{`}} $value {{`}}`}}).' + expr: sum(up{job=~".*etcd.*"} == bool 1) by (job) < ((count(up{job=~".*etcd.*"}) by (job) + 1) / 2) + for: 3m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdNoLeader + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": member {{`{{`}} $labels.instance {{`}}`}} has no leader.' + expr: etcd_server_has_leader{job=~".*etcd.*"} == 0 + for: 1m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighNumberOfLeaderChanges + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": instance {{`{{`}} $labels.instance {{`}}`}} has seen {{`{{`}} $value {{`}}`}} leader changes within the last hour.' + expr: rate(etcd_server_leader_changes_seen_total{job=~".*etcd.*"}[15m]) > 3 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighNumberOfFailedGRPCRequests + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": {{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.grpc_method {{`}}`}} failed on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: |- + 100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code!="OK"}[5m])) BY (job, instance, grpc_service, grpc_method) + / + sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) BY (job, instance, grpc_service, grpc_method) + > 1 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighNumberOfFailedGRPCRequests + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": {{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.grpc_method {{`}}`}} failed on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: |- + 100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code!="OK"}[5m])) BY (job, instance, grpc_service, grpc_method) + / + sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) BY (job, instance, grpc_service, grpc_method) + > 5 + for: 5m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdGRPCRequestsSlow + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": gRPC requests to {{`{{`}} $labels.grpc_method {{`}}`}} are taking {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: |- + histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=~".*etcd.*", grpc_type="unary"}[5m])) by (job, instance, grpc_service, grpc_method, le)) + > 0.15 + for: 10m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdMemberCommunicationSlow + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": member communication with {{`{{`}} $labels.To {{`}}`}} is taking {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: |- + histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket{job=~".*etcd.*"}[5m])) + > 0.15 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighNumberOfFailedProposals + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": {{`{{`}} $value {{`}}`}} proposal failures within the last hour on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: rate(etcd_server_proposals_failed_total{job=~".*etcd.*"}[15m]) > 5 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighFsyncDurations + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": 99th percentile fync durations are {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: |- + histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket{job=~".*etcd.*"}[5m])) + > 0.5 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighCommitDurations + annotations: + message: 'etcd cluster "{{`{{`}} $labels.job {{`}}`}}": 99th percentile commit durations {{`{{`}} $value {{`}}`}}s on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: |- + histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket{job=~".*etcd.*"}[5m])) + > 0.25 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighNumberOfFailedHTTPRequests + annotations: + message: '{{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.method {{`}}`}} failed on etcd instance {{`{{`}} $labels.instance {{`}}`}}' + expr: |- + sum(rate(etcd_http_failed_total{job=~".*etcd.*", code!="404"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job=~".*etcd.*"}[5m])) + BY (method) > 0.01 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHighNumberOfFailedHTTPRequests + annotations: + message: '{{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.method {{`}}`}} failed on etcd instance {{`{{`}} $labels.instance {{`}}`}}.' + expr: |- + sum(rate(etcd_http_failed_total{job=~".*etcd.*", code!="404"}[5m])) BY (method) / sum(rate(etcd_http_received_total{job=~".*etcd.*"}[5m])) + BY (method) > 0.05 + for: 10m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: etcdHTTPRequestsSlow + annotations: + message: etcd instance {{`{{`}} $labels.instance {{`}}`}} HTTP requests to {{`{{`}} $labels.method {{`}}`}} are slow. + expr: |- + histogram_quantile(0.99, rate(etcd_http_successful_duration_seconds_bucket[5m])) + > 0.15 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/general.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/general.rules.yaml new file mode 100755 index 000000000..cde6feb5c --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/general.rules.yaml @@ -0,0 +1,56 @@ +{{- /* +Generated from 'general.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.general }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "general.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: general.rules + rules: + - alert: TargetDown + annotations: + message: '{{`{{`}} $value {{`}}`}}% of the {{`{{`}} $labels.job {{`}}`}} targets are down.' + expr: 100 * (count(up == 0) BY (job) / count(up) BY (job)) > 10 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: Watchdog + annotations: + message: 'This is an alert meant to ensure that the entire alerting pipeline is functional. + + This alert is always firing, therefore it should always be firing in Alertmanager + + and always fire against a receiver. There are integrations with various notification + + mechanisms that send a notification when this alert is not firing. For example the + + "DeadMansSnitch" integration in PagerDuty. + + ' + expr: vector(1) + labels: + severity: none +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/k8s.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/k8s.rules.yaml new file mode 100755 index 000000000..08aa7fe2b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/k8s.rules.yaml @@ -0,0 +1,83 @@ +{{- /* +Generated from 'k8s.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.k8s }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "k8s.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: k8s.rules + rules: + - expr: sum(rate(container_cpu_usage_seconds_total{job="kubelet", image!="", container_name!=""}[5m])) by (namespace) + record: namespace:container_cpu_usage_seconds_total:sum_rate + - expr: sum(container_memory_usage_bytes{job="kubelet", image!="", container_name!=""}) by (namespace) + record: namespace:container_memory_usage_bytes:sum + - expr: |- + sum by (namespace, pod_name, container_name) ( + rate(container_cpu_usage_seconds_total{job="kubelet", image!="", container_name!=""}[5m]) + ) + record: namespace_pod_name_container_name:container_cpu_usage_seconds_total:sum_rate + - expr: |- + sum by(namespace) ( + kube_pod_container_resource_requests_memory_bytes{job="kube-state-metrics"} + * on (endpoint, instance, job, namespace, pod, service) + group_left(phase) (kube_pod_status_phase{phase=~"^(Pending|Running)$"} == 1) + ) + record: namespace_name:kube_pod_container_resource_requests_memory_bytes:sum + - expr: |- + sum by (namespace) ( + kube_pod_container_resource_requests_cpu_cores{job="kube-state-metrics"} + * on (endpoint, instance, job, namespace, pod, service) + group_left(phase) (kube_pod_status_phase{phase=~"^(Pending|Running)$"} == 1) + ) + record: namespace_name:kube_pod_container_resource_requests_cpu_cores:sum + - expr: |- + sum( + label_replace( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="ReplicaSet"}, + "replicaset", "$1", "owner_name", "(.*)" + ) * on(replicaset, namespace) group_left(owner_name) kube_replicaset_owner{job="kube-state-metrics"}, + "workload", "$1", "owner_name", "(.*)" + ) + ) by (namespace, workload, pod) + labels: + workload_type: deployment + record: mixin_pod_workload + - expr: |- + sum( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="DaemonSet"}, + "workload", "$1", "owner_name", "(.*)" + ) + ) by (namespace, workload, pod) + labels: + workload_type: daemonset + record: mixin_pod_workload + - expr: |- + sum( + label_replace( + kube_pod_owner{job="kube-state-metrics", owner_kind="StatefulSet"}, + "workload", "$1", "owner_name", "(.*)" + ) + ) by (namespace, workload, pod) + labels: + workload_type: statefulset + record: mixin_pod_workload +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-apiserver.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-apiserver.rules.yaml new file mode 100755 index 000000000..e3a929692 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-apiserver.rules.yaml @@ -0,0 +1,39 @@ +{{- /* +Generated from 'kube-apiserver.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.kubeApiServer.enabled .Values.defaultRules.rules.kubeApiserver }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-apiserver.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kube-apiserver.rules + rules: + - expr: histogram_quantile(0.99, sum(rate(apiserver_request_latencies_bucket{job="apiserver"}[5m])) without(instance, pod)) / 1e+06 + labels: + quantile: '0.99' + record: cluster_quantile:apiserver_request_latencies:histogram_quantile + - expr: histogram_quantile(0.9, sum(rate(apiserver_request_latencies_bucket{job="apiserver"}[5m])) without(instance, pod)) / 1e+06 + labels: + quantile: '0.9' + record: cluster_quantile:apiserver_request_latencies:histogram_quantile + - expr: histogram_quantile(0.5, sum(rate(apiserver_request_latencies_bucket{job="apiserver"}[5m])) without(instance, pod)) / 1e+06 + labels: + quantile: '0.5' + record: cluster_quantile:apiserver_request_latencies:histogram_quantile +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-prometheus-node-alerting.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-prometheus-node-alerting.rules.yaml new file mode 100755 index 000000000..a8d5400cb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-prometheus-node-alerting.rules.yaml @@ -0,0 +1,47 @@ +{{- /* +Generated from 'kube-prometheus-node-alerting.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubePrometheusNodeAlerting }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-prometheus-node-alerting.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kube-prometheus-node-alerting.rules + rules: + - alert: NodeDiskRunningFull + annotations: + message: Device {{`{{`}} $labels.device {{`}}`}} of node-exporter {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}} will be full within the next 24 hours. + expr: '(node:node_filesystem_usage: > 0.85) and (predict_linear(node:node_filesystem_avail:[6h], 3600 * 24) < 0)' + for: 30m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeDiskRunningFull + annotations: + message: Device {{`{{`}} $labels.device {{`}}`}} of node-exporter {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}} will be full within the next 2 hours. + expr: '(node:node_filesystem_usage: > 0.85) and (predict_linear(node:node_filesystem_avail:[30m], 3600 * 2) < 0)' + for: 10m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-prometheus-node-recording.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-prometheus-node-recording.rules.yaml new file mode 100755 index 000000000..87f072fd0 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-prometheus-node-recording.rules.yaml @@ -0,0 +1,41 @@ +{{- /* +Generated from 'kube-prometheus-node-recording.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubePrometheusNodeRecording }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-prometheus-node-recording.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kube-prometheus-node-recording.rules + rules: + - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait"}[3m])) BY (instance) + record: instance:node_cpu:rate:sum + - expr: sum((node_filesystem_size_bytes{mountpoint="/"} - node_filesystem_free_bytes{mountpoint="/"})) BY (instance) + record: instance:node_filesystem_usage:sum + - expr: sum(rate(node_network_receive_bytes_total[3m])) BY (instance) + record: instance:node_network_receive_bytes:rate:sum + - expr: sum(rate(node_network_transmit_bytes_total[3m])) BY (instance) + record: instance:node_network_transmit_bytes:rate:sum + - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait"}[5m])) WITHOUT (cpu, mode) / ON(instance) GROUP_LEFT() count(sum(node_cpu_seconds_total) BY (instance, cpu)) BY (instance) + record: instance:node_cpu:ratio + - expr: sum(rate(node_cpu_seconds_total{mode!="idle",mode!="iowait"}[5m])) + record: cluster:node_cpu:sum_rate5m + - expr: cluster:node_cpu_seconds_total:rate5m / count(sum(node_cpu_seconds_total) BY (instance, cpu)) + record: cluster:node_cpu:ratio +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-scheduler.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-scheduler.rules.yaml new file mode 100755 index 000000000..e8de8ed6e --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kube-scheduler.rules.yaml @@ -0,0 +1,65 @@ +{{- /* +Generated from 'kube-scheduler.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubeScheduler }} +{{- if (include "exporter.kubeScheduler.enabled" .)}} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kube-scheduler.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kube-scheduler.rules + rules: + - expr: histogram_quantile(0.99, sum(rate(scheduler_e2e_scheduling_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 + labels: + quantile: '0.99' + record: cluster_quantile:scheduler_e2e_scheduling_latency:histogram_quantile + - expr: histogram_quantile(0.99, sum(rate(scheduler_scheduling_algorithm_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 + labels: + quantile: '0.99' + record: cluster_quantile:scheduler_scheduling_algorithm_latency:histogram_quantile + - expr: histogram_quantile(0.99, sum(rate(scheduler_binding_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 + labels: + quantile: '0.99' + record: cluster_quantile:scheduler_binding_latency:histogram_quantile + - expr: histogram_quantile(0.9, sum(rate(scheduler_e2e_scheduling_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 + labels: + quantile: '0.9' + record: cluster_quantile:scheduler_e2e_scheduling_latency:histogram_quantile + - expr: histogram_quantile(0.9, sum(rate(scheduler_scheduling_algorithm_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 + labels: + quantile: '0.9' + record: cluster_quantile:scheduler_scheduling_algorithm_latency:histogram_quantile + - expr: histogram_quantile(0.9, sum(rate(scheduler_binding_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 + labels: + quantile: '0.9' + record: cluster_quantile:scheduler_binding_latency:histogram_quantile + - expr: histogram_quantile(0.5, sum(rate(scheduler_e2e_scheduling_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 + labels: + quantile: '0.5' + record: cluster_quantile:scheduler_e2e_scheduling_latency:histogram_quantile + - expr: histogram_quantile(0.5, sum(rate(scheduler_scheduling_algorithm_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 + labels: + quantile: '0.5' + record: cluster_quantile:scheduler_scheduling_algorithm_latency:histogram_quantile + - expr: histogram_quantile(0.5, sum(rate(scheduler_binding_latency_microseconds_bucket{job="{{ include "exporter.kubeScheduler.jobName" . }}"}[5m])) without(instance, pod)) / 1e+06 + labels: + quantile: '0.5' + record: cluster_quantile:scheduler_binding_latency:histogram_quantile +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-absent.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-absent.yaml new file mode 100755 index 000000000..85d27cc77 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-absent.yaml @@ -0,0 +1,159 @@ +{{- /* +Generated from 'kubernetes-absent' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesAbsent }} +{{- $operatorJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "operator" }} +{{- $prometheusJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus" }} +{{- $alertmanagerJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "alertmanager" }} +{{- $namespace := printf "%s" (include "kube-prometheus-stack.namespace" .) }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-absent" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kubernetes-absent + rules: +{{- if .Values.alertmanager.enabled }} + - alert: AlertmanagerDown + annotations: + message: Alertmanager has disappeared from Prometheus target discovery. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-alertmanagerdown + expr: absent(up{job="{{ $alertmanagerJob }}",namespace="{{ $namespace }}"} == 1) + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} +{{- if .Values.kubeDns.enabled }} + - alert: CoreDNSDown + annotations: + message: CoreDNS has disappeared from Prometheus target discovery. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-corednsdown + expr: absent(up{job="kube-dns"} == 1) + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} +{{- if .Values.kubeApiServer.enabled }} + - alert: KubeAPIDown + annotations: + message: KubeAPI has disappeared from Prometheus target discovery. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapidown + expr: absent(up{job="apiserver"} == 1) + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} +{{- if (include "exporter.kubeControllerManager.enabled" .)}} + - alert: KubeControllerManagerDown + annotations: + message: KubeControllerManager has disappeared from Prometheus target discovery. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubecontrollermanagerdown + expr: absent(up{job="{{ include "exporter.kubeControllerManager.jobName" . }}"} == 1) + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} +{{- if (include "exporter.kubeScheduler.enabled" .)}} + - alert: KubeSchedulerDown + annotations: + message: KubeScheduler has disappeared from Prometheus target discovery. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeschedulerdown + expr: absent(up{job="{{ include "exporter.kubeScheduler.jobName" . }}"} == 1) + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} +{{- if .Values.kubeStateMetrics.enabled }} + - alert: KubeStateMetricsDown + annotations: + message: KubeStateMetrics has disappeared from Prometheus target discovery. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubestatemetricsdown + expr: absent(up{job="kube-state-metrics"} == 1) + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} +{{- if .Values.prometheusOperator.kubeletService.enabled }} + - alert: KubeletDown + annotations: + message: Kubelet has disappeared from Prometheus target discovery. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeletdown + expr: absent(up{job="kubelet"} == 1) + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} +{{- if .Values.nodeExporter.enabled }} + - alert: NodeExporterDown + annotations: + message: NodeExporter has disappeared from Prometheus target discovery. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-nodeexporterdown + expr: absent(up{job="node-exporter"} == 1) + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} + - alert: PrometheusDown + annotations: + message: Prometheus has disappeared from Prometheus target discovery. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-prometheusdown + expr: absent(up{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"} == 1) + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- if .Values.prometheusOperator.enabled }} + - alert: PrometheusOperatorDown + annotations: + message: PrometheusOperator has disappeared from Prometheus target discovery. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-prometheusoperatordown + expr: absent(up{job="{{ $operatorJob }}",namespace="{{ $namespace }}"} == 1) + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-apps.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-apps.yaml new file mode 100755 index 000000000..e7a41ca2a --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-apps.yaml @@ -0,0 +1,200 @@ +{{- /* +Generated from 'kubernetes-apps' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesApps }} +{{- $targetNamespace := .Values.defaultRules.appNamespacesTarget }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-apps" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kubernetes-apps + rules: + - alert: KubePodCrashLooping + annotations: + message: Pod {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}} ({{`{{`}} $labels.container {{`}}`}}) is restarting {{`{{`}} printf "%.2f" $value {{`}}`}} times / 5 minutes. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepodcrashlooping + expr: rate(kube_pod_container_status_restarts_total{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"}[15m]) * 60 * 5 > 0 + for: 1h + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubePodNotReady + annotations: + message: Pod {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}} has been in a non-ready state for longer than an hour. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepodnotready + expr: sum by (namespace, pod) (kube_pod_status_phase{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}", phase=~"Pending|Unknown"}) > 0 + for: 1h + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeDeploymentGenerationMismatch + annotations: + message: Deployment generation for {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.deployment {{`}}`}} does not match, this indicates that the Deployment has failed but has not been rolled back. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubedeploymentgenerationmismatch + expr: |- + kube_deployment_status_observed_generation{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + != + kube_deployment_metadata_generation{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeDeploymentReplicasMismatch + annotations: + message: Deployment {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.deployment {{`}}`}} has not matched the expected number of replicas for longer than an hour. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubedeploymentreplicasmismatch + expr: |- + kube_deployment_spec_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + != + kube_deployment_status_replicas_available{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + for: 1h + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeStatefulSetReplicasMismatch + annotations: + message: StatefulSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.statefulset {{`}}`}} has not matched the expected number of replicas for longer than 15 minutes. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubestatefulsetreplicasmismatch + expr: |- + kube_statefulset_status_replicas_ready{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + != + kube_statefulset_status_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeStatefulSetGenerationMismatch + annotations: + message: StatefulSet generation for {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.statefulset {{`}}`}} does not match, this indicates that the StatefulSet has failed but has not been rolled back. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubestatefulsetgenerationmismatch + expr: |- + kube_statefulset_status_observed_generation{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + != + kube_statefulset_metadata_generation{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeStatefulSetUpdateNotRolledOut + annotations: + message: StatefulSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.statefulset {{`}}`}} update has not been rolled out. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubestatefulsetupdatenotrolledout + expr: |- + max without (revision) ( + kube_statefulset_status_current_revision{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + unless + kube_statefulset_status_update_revision{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + ) + * + ( + kube_statefulset_replicas{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + != + kube_statefulset_status_replicas_updated{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + ) + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeDaemonSetRolloutStuck + annotations: + message: Only {{`{{`}} $value {{`}}`}}% of the desired Pods of DaemonSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.daemonset {{`}}`}} are scheduled and ready. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubedaemonsetrolloutstuck + expr: |- + kube_daemonset_status_number_ready{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + / + kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} * 100 < 100 + for: 15m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeDaemonSetNotScheduled + annotations: + message: '{{`{{`}} $value {{`}}`}} Pods of DaemonSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.daemonset {{`}}`}} are not scheduled.' + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubedaemonsetnotscheduled + expr: |- + kube_daemonset_status_desired_number_scheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} + - + kube_daemonset_status_current_number_scheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} > 0 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeDaemonSetMisScheduled + annotations: + message: '{{`{{`}} $value {{`}}`}} Pods of DaemonSet {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.daemonset {{`}}`}} are running where they are not supposed to run.' + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubedaemonsetmisscheduled + expr: kube_daemonset_status_number_misscheduled{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} > 0 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeCronJobRunning + annotations: + message: CronJob {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.cronjob {{`}}`}} is taking more than 1h to complete. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubecronjobrunning + expr: time() - kube_cronjob_next_schedule_time{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} > 3600 + for: 1h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeJobCompletion + annotations: + message: Job {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.job_name {{`}}`}} is taking more than one hour to complete. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubejobcompletion + expr: kube_job_spec_completions{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} - kube_job_status_succeeded{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} > 0 + for: 1h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeJobFailed + annotations: + message: Job {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.job_name {{`}}`}} failed to complete. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubejobfailed + expr: kube_job_status_failed{job="kube-state-metrics", namespace=~"{{ $targetNamespace }}"} > 0 + for: 1h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-resources.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-resources.yaml new file mode 100755 index 000000000..b34b442f3 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-resources.yaml @@ -0,0 +1,121 @@ +{{- /* +Generated from 'kubernetes-resources' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesResources }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-resources" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kubernetes-resources + rules: + - alert: KubeCPUOvercommit + annotations: + message: Cluster has overcommitted CPU resource requests for Pods and cannot tolerate node failure. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubecpuovercommit + expr: |- + sum(namespace_name:kube_pod_container_resource_requests_cpu_cores:sum) + / + sum(node:node_num_cpu:sum) + > + (count(node:node_num_cpu:sum)-1) / count(node:node_num_cpu:sum) + for: 5m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeMemOvercommit + annotations: + message: Cluster has overcommitted memory resource requests for Pods and cannot tolerate node failure. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubememovercommit + expr: |- + sum(namespace_name:kube_pod_container_resource_requests_memory_bytes:sum) + / + sum(node_memory_MemTotal_bytes) + > + (count(node:node_num_cpu:sum)-1) + / + count(node:node_num_cpu:sum) + for: 5m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeCPUOvercommit + annotations: + message: Cluster has overcommitted CPU resource requests for Namespaces. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubecpuovercommit + expr: |- + sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="cpu"}) + / + sum(node:node_num_cpu:sum) + > 1.5 + for: 5m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeMemOvercommit + annotations: + message: Cluster has overcommitted memory resource requests for Namespaces. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubememovercommit + expr: |- + sum(kube_resourcequota{job="kube-state-metrics", type="hard", resource="memory"}) + / + sum(node_memory_MemTotal_bytes{job="node-exporter"}) + > 1.5 + for: 5m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeQuotaExceeded + annotations: + message: Namespace {{`{{`}} $labels.namespace {{`}}`}} is using {{`{{`}} printf "%0.0f" $value {{`}}`}}% of its {{`{{`}} $labels.resource {{`}}`}} quota. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubequotaexceeded + expr: |- + 100 * kube_resourcequota{job="kube-state-metrics", type="used"} + / ignoring(instance, job, type) + (kube_resourcequota{job="kube-state-metrics", type="hard"} > 0) + > 90 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: CPUThrottlingHigh + annotations: + message: '{{`{{`}} printf "%0.0f" $value {{`}}`}}% throttling of CPU in namespace {{`{{`}} $labels.namespace {{`}}`}} for container {{`{{`}} $labels.container_name {{`}}`}} in pod {{`{{`}} $labels.pod_name {{`}}`}}.' + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-cputhrottlinghigh + expr: |- + 100 * sum(increase(container_cpu_cfs_throttled_periods_total{container_name!="", }[5m])) by (container_name, pod_name, namespace) + / + sum(increase(container_cpu_cfs_periods_total{}[5m])) by (container_name, pod_name, namespace) + > 25 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-storage.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-storage.yaml new file mode 100755 index 000000000..6469fffc5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-storage.yaml @@ -0,0 +1,72 @@ +{{- /* +Generated from 'kubernetes-storage' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesStorage }} +{{- $targetNamespace := .Values.defaultRules.appNamespacesTarget }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-storage" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kubernetes-storage + rules: + - alert: KubePersistentVolumeUsageCritical + annotations: + message: The PersistentVolume claimed by {{`{{`}} $labels.persistentvolumeclaim {{`}}`}} in Namespace {{`{{`}} $labels.namespace {{`}}`}} is only {{`{{`}} printf "%0.2f" $value {{`}}`}}% free. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepersistentvolumeusagecritical + expr: |- + 100 * kubelet_volume_stats_available_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}"} + / + kubelet_volume_stats_capacity_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}"} + < 3 + for: 1m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubePersistentVolumeFullInFourDays + annotations: + message: Based on recent sampling, the PersistentVolume claimed by {{`{{`}} $labels.persistentvolumeclaim {{`}}`}} in Namespace {{`{{`}} $labels.namespace {{`}}`}} is expected to fill up within four days. Currently {{`{{`}} printf "%0.2f" $value {{`}}`}}% is available. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepersistentvolumefullinfourdays + expr: |- + 100 * ( + kubelet_volume_stats_available_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}"} + / + kubelet_volume_stats_capacity_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}"} + ) < 15 + and + predict_linear(kubelet_volume_stats_available_bytes{job="kubelet", namespace=~"{{ $targetNamespace }}"}[6h], 4 * 24 * 3600) < 0 + for: 5m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubePersistentVolumeErrors + annotations: + message: The persistent volume {{`{{`}} $labels.persistentvolume {{`}}`}} has status {{`{{`}} $labels.phase {{`}}`}}. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubepersistentvolumeerrors + expr: kube_persistentvolume_status_phase{phase=~"Failed|Pending",job="kube-state-metrics"} > 0 + for: 5m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-system.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-system.yaml new file mode 100755 index 000000000..da232057b --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/kubernetes-system.yaml @@ -0,0 +1,184 @@ +{{- /* +Generated from 'kubernetes-system' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.kubernetesSystem }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "kubernetes-system" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: kubernetes-system + rules: + - alert: KubeNodeNotReady + annotations: + message: '{{`{{`}} $labels.node {{`}}`}} has been unready for more than an hour.' + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubenodenotready + expr: kube_node_status_condition{job="kube-state-metrics",condition="Ready",status="true"} == 0 + for: 1h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeVersionMismatch + annotations: + message: There are {{`{{`}} $value {{`}}`}} different semantic versions of Kubernetes components running. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeversionmismatch + expr: count(count by (gitVersion) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},"gitVersion","$1","gitVersion","(v[0-9]*.[0-9]*.[0-9]*).*"))) > 1 + for: 1h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeClientErrors + annotations: + message: Kubernetes API server client '{{`{{`}} $labels.job {{`}}`}}/{{`{{`}} $labels.instance {{`}}`}}' is experiencing {{`{{`}} printf "%0.0f" $value {{`}}`}}% errors.' + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeclienterrors + expr: |- + (sum(rate(rest_client_requests_total{code=~"5.."}[5m])) by (instance, job) + / + sum(rate(rest_client_requests_total[5m])) by (instance, job)) + * 100 > 1 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeClientErrors + annotations: + message: Kubernetes API server client '{{`{{`}} $labels.job {{`}}`}}/{{`{{`}} $labels.instance {{`}}`}}' is experiencing {{`{{`}} printf "%0.0f" $value {{`}}`}} errors / second. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeclienterrors + expr: sum(rate(ksm_scrape_error_total{job="kube-state-metrics"}[5m])) by (instance, job) > 0.1 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeletTooManyPods + annotations: + message: Kubelet {{`{{`}} $labels.instance {{`}}`}} is running {{`{{`}} $value {{`}}`}} Pods, close to the limit of 110. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubelettoomanypods + expr: kubelet_running_pod_count{job="kubelet"} > 110 * 0.9 + for: 15m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeAPILatencyHigh + annotations: + message: The API server has a 99th percentile latency of {{`{{`}} $value {{`}}`}} seconds for {{`{{`}} $labels.verb {{`}}`}} {{`{{`}} $labels.resource {{`}}`}}. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapilatencyhigh + expr: cluster_quantile:apiserver_request_latencies:histogram_quantile{job="apiserver",quantile="0.99",subresource!="log",verb!~"^(?:LIST|WATCH|WATCHLIST|PROXY|CONNECT)$"} > 1 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeAPILatencyHigh + annotations: + message: The API server has a 99th percentile latency of {{`{{`}} $value {{`}}`}} seconds for {{`{{`}} $labels.verb {{`}}`}} {{`{{`}} $labels.resource {{`}}`}}. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapilatencyhigh + expr: cluster_quantile:apiserver_request_latencies:histogram_quantile{job="apiserver",quantile="0.99",subresource!="log",verb!~"^(?:LIST|WATCH|WATCHLIST|PROXY|CONNECT)$"} > 4 + for: 10m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeAPIErrorsHigh + annotations: + message: API server is returning errors for {{`{{`}} $value {{`}}`}}% of requests. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapierrorshigh + expr: |- + sum(rate(apiserver_request_count{job="apiserver",code=~"^(?:5..)$"}[5m])) + / + sum(rate(apiserver_request_count{job="apiserver"}[5m])) * 100 > 3 + for: 10m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeAPIErrorsHigh + annotations: + message: API server is returning errors for {{`{{`}} $value {{`}}`}}% of requests. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapierrorshigh + expr: |- + sum(rate(apiserver_request_count{job="apiserver",code=~"^(?:5..)$"}[5m])) + / + sum(rate(apiserver_request_count{job="apiserver"}[5m])) * 100 > 1 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeAPIErrorsHigh + annotations: + message: API server is returning errors for {{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.verb {{`}}`}} {{`{{`}} $labels.resource {{`}}`}} {{`{{`}} $labels.subresource {{`}}`}}. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapierrorshigh + expr: |- + sum(rate(apiserver_request_count{job="apiserver",code=~"^(?:5..)$"}[5m])) by (resource,subresource,verb) + / + sum(rate(apiserver_request_count{job="apiserver"}[5m])) by (resource,subresource,verb) * 100 > 10 + for: 10m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeAPIErrorsHigh + annotations: + message: API server is returning errors for {{`{{`}} $value {{`}}`}}% of requests for {{`{{`}} $labels.verb {{`}}`}} {{`{{`}} $labels.resource {{`}}`}} {{`{{`}} $labels.subresource {{`}}`}}. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeapierrorshigh + expr: |- + sum(rate(apiserver_request_count{job="apiserver",code=~"^(?:5..)$"}[5m])) by (resource,subresource,verb) + / + sum(rate(apiserver_request_count{job="apiserver"}[5m])) by (resource,subresource,verb) * 100 > 5 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeClientCertificateExpiration + annotations: + message: A client certificate used to authenticate to the apiserver is expiring in less than 7.0 days. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeclientcertificateexpiration + expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 604800 + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: KubeClientCertificateExpiration + annotations: + message: A client certificate used to authenticate to the apiserver is expiring in less than 24.0 hours. + runbook_url: {{ .Values.defaultRules.runbookUrl }}alert-name-kubeclientcertificateexpiration + expr: apiserver_client_certificate_expiration_seconds_count{job="apiserver"} > 0 and histogram_quantile(0.01, sum by (job, le) (rate(apiserver_client_certificate_expiration_seconds_bucket{job="apiserver"}[5m]))) < 86400 + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/node-network.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/node-network.yaml new file mode 100755 index 000000000..c75f1ae07 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/node-network.yaml @@ -0,0 +1,57 @@ +{{- /* +Generated from 'node-network' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.network }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "node-network" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: node-network + rules: + - alert: NetworkReceiveErrors + annotations: + message: Network interface "{{`{{`}} $labels.device {{`}}`}}" showing receive errors on node-exporter {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}}" + expr: rate(node_network_receive_errs_total{job="node-exporter",device!~"veth.+"}[2m]) > 0 + for: 2m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NetworkTransmitErrors + annotations: + message: Network interface "{{`{{`}} $labels.device {{`}}`}}" showing transmit errors on node-exporter {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}}" + expr: rate(node_network_transmit_errs_total{job="node-exporter",device!~"veth.+"}[2m]) > 0 + for: 2m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: NodeNetworkInterfaceFlapping + annotations: + message: Network interface "{{`{{`}} $labels.device {{`}}`}}" changing it's up status often on node-exporter {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}}" + expr: changes(node_network_up{job="node-exporter",device!~"veth.+"}[2m]) > 2 + for: 2m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/node-time.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/node-time.yaml new file mode 100755 index 000000000..b7a2fc92f --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/node-time.yaml @@ -0,0 +1,37 @@ +{{- /* +Generated from 'node-time' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.time }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "node-time" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: node-time + rules: + - alert: ClockSkewDetected + annotations: + message: Clock skew detected on node-exporter {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod {{`}}`}}. Ensure NTP is configured correctly on this host. + expr: abs(node_timex_offset_seconds{job="node-exporter"}) > 0.03 + for: 2m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/node.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/node.rules.yaml new file mode 100755 index 000000000..2bc7af3a9 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/node.rules.yaml @@ -0,0 +1,202 @@ +{{- /* +Generated from 'node.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.node }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "node.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: node.rules + rules: + - expr: sum(min(kube_pod_info) by (node)) + record: ':kube_pod_info_node_count:' + - expr: max(label_replace(kube_pod_info{job="kube-state-metrics"}, "pod", "$1", "pod", "(.*)")) by (node, namespace, pod) + record: 'node_namespace_pod:kube_pod_info:' + - expr: |- + count by (node) (sum by (node, cpu) ( + node_cpu_seconds_total{job="node-exporter"} + * on (namespace, pod) group_left(node) + node_namespace_pod:kube_pod_info: + )) + record: node:node_num_cpu:sum + - expr: 1 - avg(rate(node_cpu_seconds_total{job="node-exporter",mode="idle"}[1m])) + record: :node_cpu_utilisation:avg1m + - expr: |- + 1 - avg by (node) ( + rate(node_cpu_seconds_total{job="node-exporter",mode="idle"}[1m]) + * on (namespace, pod) group_left(node) + node_namespace_pod:kube_pod_info:) + record: node:node_cpu_utilisation:avg1m + - expr: |- + node:node_cpu_utilisation:avg1m + * + node:node_num_cpu:sum + / + scalar(sum(node:node_num_cpu:sum)) + record: node:cluster_cpu_utilisation:ratio + - expr: |- + sum(node_load1{job="node-exporter"}) + / + sum(node:node_num_cpu:sum) + record: ':node_cpu_saturation_load1:' + - expr: |- + sum by (node) ( + node_load1{job="node-exporter"} + * on (namespace, pod) group_left(node) + node_namespace_pod:kube_pod_info: + ) + / + node:node_num_cpu:sum + record: 'node:node_cpu_saturation_load1:' + - expr: |- + 1 - + sum(node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_Buffers_bytes{job="node-exporter"}) + / + sum(node_memory_MemTotal_bytes{job="node-exporter"}) + record: ':node_memory_utilisation:' + - expr: sum(node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_Buffers_bytes{job="node-exporter"}) + record: :node_memory_MemFreeCachedBuffers_bytes:sum + - expr: sum(node_memory_MemTotal_bytes{job="node-exporter"}) + record: :node_memory_MemTotal_bytes:sum + - expr: |- + sum by (node) ( + (node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_Buffers_bytes{job="node-exporter"}) + * on (namespace, pod) group_left(node) + node_namespace_pod:kube_pod_info: + ) + record: node:node_memory_bytes_available:sum + - expr: |- + sum by (node) ( + node_memory_MemTotal_bytes{job="node-exporter"} + * on (namespace, pod) group_left(node) + node_namespace_pod:kube_pod_info: + ) + record: node:node_memory_bytes_total:sum + - expr: |- + (node:node_memory_bytes_total:sum - node:node_memory_bytes_available:sum) + / + node:node_memory_bytes_total:sum + record: node:node_memory_utilisation:ratio + - expr: |- + (node:node_memory_bytes_total:sum - node:node_memory_bytes_available:sum) + / + scalar(sum(node:node_memory_bytes_total:sum)) + record: node:cluster_memory_utilisation:ratio + - expr: |- + 1e3 * sum( + (rate(node_vmstat_pgpgin{job="node-exporter"}[1m]) + + rate(node_vmstat_pgpgout{job="node-exporter"}[1m])) + ) + record: :node_memory_swap_io_bytes:sum_rate + - expr: |- + 1 - + sum by (node) ( + (node_memory_MemFree_bytes{job="node-exporter"} + node_memory_Cached_bytes{job="node-exporter"} + node_memory_Buffers_bytes{job="node-exporter"}) + * on (namespace, pod) group_left(node) + node_namespace_pod:kube_pod_info: + ) + / + sum by (node) ( + node_memory_MemTotal_bytes{job="node-exporter"} + * on (namespace, pod) group_left(node) + node_namespace_pod:kube_pod_info: + ) + record: 'node:node_memory_utilisation:' + - expr: 1 - (node:node_memory_bytes_available:sum / node:node_memory_bytes_total:sum) + record: 'node:node_memory_utilisation_2:' + - expr: |- + 1e3 * sum by (node) ( + (rate(node_vmstat_pgpgin{job="node-exporter"}[1m]) + + rate(node_vmstat_pgpgout{job="node-exporter"}[1m])) + * on (namespace, pod) group_left(node) + node_namespace_pod:kube_pod_info: + ) + record: node:node_memory_swap_io_bytes:sum_rate + - expr: avg(irate(node_disk_io_time_seconds_total{job="node-exporter",device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+"}[1m])) + record: :node_disk_utilisation:avg_irate + - expr: |- + avg by (node) ( + irate(node_disk_io_time_seconds_total{job="node-exporter",device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+"}[1m]) + * on (namespace, pod) group_left(node) + node_namespace_pod:kube_pod_info: + ) + record: node:node_disk_utilisation:avg_irate + - expr: avg(irate(node_disk_io_time_weighted_seconds_total{job="node-exporter",device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+"}[1m])) + record: :node_disk_saturation:avg_irate + - expr: |- + avg by (node) ( + irate(node_disk_io_time_weighted_seconds_total{job="node-exporter",device=~"nvme.+|rbd.+|sd.+|vd.+|xvd.+|dm-.+"}[1m]) + * on (namespace, pod) group_left(node) + node_namespace_pod:kube_pod_info: + ) + record: node:node_disk_saturation:avg_irate + - expr: |- + max by (instance, namespace, pod, device) ((node_filesystem_size_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"} + - node_filesystem_avail_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"}) + / node_filesystem_size_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"}) + record: 'node:node_filesystem_usage:' + - expr: max by (instance, namespace, pod, device) (node_filesystem_avail_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"} / node_filesystem_size_bytes{fstype=~"ext[234]|btrfs|xfs|zfs"}) + record: 'node:node_filesystem_avail:' + - expr: |- + sum(irate(node_network_receive_bytes_total{job="node-exporter",device!~"veth.+"}[1m])) + + sum(irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[1m])) + record: :node_net_utilisation:sum_irate + - expr: |- + sum by (node) ( + (irate(node_network_receive_bytes_total{job="node-exporter",device!~"veth.+"}[1m]) + + irate(node_network_transmit_bytes_total{job="node-exporter",device!~"veth.+"}[1m])) + * on (namespace, pod) group_left(node) + node_namespace_pod:kube_pod_info: + ) + record: node:node_net_utilisation:sum_irate + - expr: |- + sum(irate(node_network_receive_drop_total{job="node-exporter",device!~"veth.+"}[1m])) + + sum(irate(node_network_transmit_drop_total{job="node-exporter",device!~"veth.+"}[1m])) + record: :node_net_saturation:sum_irate + - expr: |- + sum by (node) ( + (irate(node_network_receive_drop_total{job="node-exporter",device!~"veth.+"}[1m]) + + irate(node_network_transmit_drop_total{job="node-exporter",device!~"veth.+"}[1m])) + * on (namespace, pod) group_left(node) + node_namespace_pod:kube_pod_info: + ) + record: node:node_net_saturation:sum_irate + - expr: |- + max( + max( + kube_pod_info{job="kube-state-metrics", host_ip!=""} + ) by (node, host_ip) + * on (host_ip) group_right (node) + label_replace( + (max(node_filesystem_files{job="node-exporter", mountpoint="/"}) by (instance)), "host_ip", "$1", "instance", "(.*):.*" + ) + ) by (node) + record: 'node:node_inodes_total:' + - expr: |- + max( + max( + kube_pod_info{job="kube-state-metrics", host_ip!=""} + ) by (node, host_ip) + * on (host_ip) group_right (node) + label_replace( + (max(node_filesystem_files_free{job="node-exporter", mountpoint="/"}) by (instance)), "host_ip", "$1", "instance", "(.*):.*" + ) + ) by (node) + record: 'node:node_inodes_free:' +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/prometheus-operator.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/prometheus-operator.yaml new file mode 100755 index 000000000..a8a8915b6 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/prometheus-operator.yaml @@ -0,0 +1,49 @@ +{{- /* +Generated from 'prometheus-operator' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.prometheusOperator }} +{{- $operatorJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "operator" }} +{{- $namespace := printf "%s" (include "kube-prometheus-stack.namespace" .) }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus-operator" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: prometheus-operator + rules: + - alert: PrometheusOperatorReconcileErrors + annotations: + message: Errors while reconciling {{`{{`}} $labels.controller {{`}}`}} in {{`{{`}} $labels.namespace {{`}}`}} Namespace. + expr: rate(prometheus_operator_reconcile_errors_total{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[5m]) > 0.1 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusOperatorNodeLookupErrors + annotations: + message: Errors while reconciling Prometheus in {{`{{`}} $labels.namespace {{`}}`}} Namespace. + expr: rate(prometheus_operator_node_address_lookup_errors_total{job="{{ $operatorJob }}",namespace="{{ $namespace }}"}[5m]) > 0.1 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/prometheus.rules.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/prometheus.rules.yaml new file mode 100755 index 000000000..0480c83b5 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/rules/prometheus.rules.yaml @@ -0,0 +1,139 @@ +{{- /* +Generated from 'prometheus.rules' group from https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/release-0.1/manifests/prometheus-rules.yaml +Do not change in-place! In order to change this file first read following link: +https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack/hack +*/ -}} +{{- $kubeTargetVersion := default .Capabilities.KubeVersion.GitVersion .Values.kubeTargetVersionOverride }} +{{- if and (semverCompare ">=1.10.0-0" $kubeTargetVersion) (semverCompare "<1.14.0-0" $kubeTargetVersion) .Values.defaultRules.create .Values.defaultRules.rules.prometheus }} +{{- $prometheusJob := printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus" }} +{{- $namespace := printf "%s" (include "kube-prometheus-stack.namespace" .) }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" .) "prometheus.rules" | trunc 63 | trimSuffix "-" }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.defaultRules.labels }} +{{ toYaml .Values.defaultRules.labels | indent 4 }} +{{- end }} +{{- if .Values.defaultRules.annotations }} + annotations: +{{ toYaml .Values.defaultRules.annotations | indent 4 }} +{{- end }} +spec: + groups: + - name: prometheus.rules + rules: + - alert: PrometheusConfigReloadFailed + annotations: + description: Reloading Prometheus' configuration has failed for {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} + summary: Reloading Prometheus' configuration failed + expr: prometheus_config_last_reload_successful{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"} == 0 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusNotificationQueueRunningFull + annotations: + description: Prometheus' alert notification queue is running full for {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}} $labels.pod{{`}}`}} + summary: Prometheus' alert notification queue is running full + expr: predict_linear(prometheus_notifications_queue_length{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m], 60 * 30) > prometheus_notifications_queue_capacity{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"} + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusErrorSendingAlerts + annotations: + description: Errors while sending alerts from Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}} $labels.pod{{`}}`}} to Alertmanager {{`{{`}}$labels.Alertmanager{{`}}`}} + summary: Errors while sending alert from Prometheus + expr: rate(prometheus_notifications_errors_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) / rate(prometheus_notifications_sent_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) > 0.01 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusErrorSendingAlerts + annotations: + description: Errors while sending alerts from Prometheus {{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}} $labels.pod{{`}}`}} to Alertmanager {{`{{`}}$labels.Alertmanager{{`}}`}} + summary: Errors while sending alerts from Prometheus + expr: rate(prometheus_notifications_errors_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) / rate(prometheus_notifications_sent_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) > 0.03 + for: 10m + labels: + severity: critical +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusNotConnectedToAlertmanagers + annotations: + description: Prometheus {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod{{`}}`}} is not connected to any Alertmanagers + summary: Prometheus is not connected to any Alertmanagers + expr: prometheus_notifications_alertmanagers_discovered{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"} < 1 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusTSDBReloadsFailing + annotations: + description: '{{`{{`}}$labels.job{{`}}`}} at {{`{{`}}$labels.instance{{`}}`}} had {{`{{`}}$value | humanize{{`}}`}} reload failures over the last four hours.' + summary: Prometheus has issues reloading data blocks from disk + expr: increase(prometheus_tsdb_reloads_failures_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[2h]) > 0 + for: 12h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusTSDBCompactionsFailing + annotations: + description: '{{`{{`}}$labels.job{{`}}`}} at {{`{{`}}$labels.instance{{`}}`}} had {{`{{`}}$value | humanize{{`}}`}} compaction failures over the last four hours.' + summary: Prometheus has issues compacting sample blocks + expr: increase(prometheus_tsdb_compactions_failed_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[2h]) > 0 + for: 12h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusTSDBWALCorruptions + annotations: + description: '{{`{{`}}$labels.job{{`}}`}} at {{`{{`}}$labels.instance{{`}}`}} has a corrupted write-ahead log (WAL).' + summary: Prometheus write-ahead log is corrupted + expr: prometheus_tsdb_wal_corruptions_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"} > 0 + for: 4h + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusNotIngestingSamples + annotations: + description: Prometheus {{`{{`}} $labels.namespace {{`}}`}}/{{`{{`}} $labels.pod{{`}}`}} isn't ingesting samples. + summary: Prometheus isn't ingesting samples + expr: rate(prometheus_tsdb_head_samples_appended_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) <= 0 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} + - alert: PrometheusTargetScrapesDuplicate + annotations: + description: '{{`{{`}}$labels.namespace{{`}}`}}/{{`{{`}}$labels.pod{{`}}`}} has many samples rejected due to duplicate timestamps but different values' + summary: Prometheus has many samples rejected + expr: increase(prometheus_target_scrapes_sample_duplicate_timestamp_total{job="{{ $prometheusJob }}",namespace="{{ $namespace }}"}[5m]) > 0 + for: 10m + labels: + severity: warning +{{- if .Values.defaultRules.additionalRuleLabels }} +{{ toYaml .Values.defaultRules.additionalRuleLabels | indent 8 }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/service.yaml new file mode 100755 index 000000000..8676b81ea --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/service.yaml @@ -0,0 +1,60 @@ +{{- if .Values.prometheus.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus + self-monitor: {{ .Values.prometheus.serviceMonitor.selfMonitor | quote }} +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.prometheus.service.labels }} +{{ toYaml .Values.prometheus.service.labels | indent 4 }} +{{- end }} +{{- if .Values.prometheus.service.annotations }} + annotations: +{{ toYaml .Values.prometheus.service.annotations | indent 4 }} +{{- end }} +spec: +{{- if .Values.prometheus.service.clusterIP }} + clusterIP: {{ .Values.prometheus.service.clusterIP }} +{{- end }} +{{- if .Values.prometheus.service.externalIPs }} + externalIPs: +{{ toYaml .Values.prometheus.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.prometheus.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.prometheus.service.loadBalancerIP }} +{{- end }} +{{- if .Values.prometheus.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.prometheus.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: {{ .Values.prometheus.prometheusSpec.portName }} + {{- if eq .Values.prometheus.service.type "NodePort" }} + nodePort: {{ .Values.prometheus.service.nodePort }} + {{- end }} + port: {{ .Values.prometheus.service.port }} + targetPort: {{ .Values.prometheus.service.targetPort }} + {{- if .Values.prometheus.thanosIngress.enabled }} + - name: grpc + {{- if eq .Values.prometheus.service.type "NodePort" }} + nodePort: {{ .Values.prometheus.thanosIngress.nodePort }} + {{- end }} + port: {{ .Values.prometheus.thanosIngress.servicePort }} + targetPort: {{ .Values.prometheus.thanosIngress.servicePort }} + {{- end }} +{{- if .Values.prometheus.service.additionalPorts }} +{{ toYaml .Values.prometheus.service.additionalPorts | indent 2 }} +{{- end }} + selector: + app: prometheus + prometheus: {{ template "kube-prometheus-stack.fullname" . }}-prometheus +{{- if .Values.prometheus.service.sessionAffinity }} + sessionAffinity: {{ .Values.prometheus.service.sessionAffinity }} +{{- end }} + type: "{{ .Values.prometheus.service.type }}" +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/serviceThanosSidecar.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/serviceThanosSidecar.yaml new file mode 100755 index 000000000..7c33379cb --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/serviceThanosSidecar.yaml @@ -0,0 +1,30 @@ +{{- if and .Values.prometheus.enabled .Values.prometheus.thanosService.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-thanos-discovery + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-thanos-discovery +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.prometheus.thanosService.labels }} +{{ toYaml .Values.prometheus.thanosService.labels | indent 4 }} +{{- end }} +{{- if .Values.prometheus.thanosService.annotations }} + annotations: +{{ toYaml .Values.prometheus.thanosService.annotations | indent 4 }} +{{- end }} +spec: + type: {{ .Values.prometheus.thanosService.type }} + clusterIP: {{ .Values.prometheus.thanosService.clusterIP }} + ports: + - name: {{ .Values.prometheus.thanosService.portName }} + port: {{ .Values.prometheus.thanosService.port }} + targetPort: {{ .Values.prometheus.thanosService.targetPort }} + {{- if eq .Values.prometheus.thanosService.type "NodePort" }} + nodePort: {{ .Values.prometheus.thanosService.nodePort }} + {{- end }} + selector: + app: prometheus + prometheus: {{ template "kube-prometheus-stack.fullname" . }}-prometheus +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/serviceaccount.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/serviceaccount.yaml new file mode 100755 index 000000000..862d5f8e4 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.prometheus.enabled .Values.prometheus.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kube-prometheus-stack.prometheus.serviceAccountName" . }} + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +{{- if .Values.prometheus.serviceAccount.annotations }} + annotations: +{{ toYaml .Values.prometheus.serviceAccount.annotations | indent 4 }} +{{- end }} +imagePullSecrets: +{{ toYaml .Values.global.imagePullSecrets | indent 2 }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/servicemonitor.yaml new file mode 100755 index 000000000..356c013ff --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.prometheus.enabled .Values.prometheus.serviceMonitor.selfMonitor }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-prometheus + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + selector: + matchLabels: + app: {{ template "kube-prometheus-stack.name" . }}-prometheus + release: {{ $.Release.Name | quote }} + self-monitor: "true" + namespaceSelector: + matchNames: + - {{ printf "%s" (include "kube-prometheus-stack.namespace" .) | quote }} + endpoints: + - port: {{ .Values.prometheus.prometheusSpec.portName }} + {{- if .Values.prometheus.serviceMonitor.interval }} + interval: {{ .Values.prometheus.serviceMonitor.interval }} + {{- end }} + {{- if .Values.prometheus.serviceMonitor.scheme }} + scheme: {{ .Values.prometheus.serviceMonitor.scheme }} + {{- end }} + {{- if .Values.prometheus.serviceMonitor.tlsConfig }} + tlsConfig: {{ toYaml .Values.prometheus.serviceMonitor.tlsConfig | nindent 6 }} + {{- end }} + {{- if .Values.prometheus.serviceMonitor.bearerTokenFile }} + bearerTokenFile: {{ .Values.prometheus.serviceMonitor.bearerTokenFile }} + {{- end }} + path: "{{ trimSuffix "/" .Values.prometheus.prometheusSpec.routePrefix }}/metrics" +{{- if .Values.prometheus.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.prometheus.serviceMonitor.metricRelabelings | indent 6) . }} +{{- end }} +{{- if .Values.prometheus.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.prometheus.serviceMonitor.relabelings | indent 6 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/servicemonitors.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/servicemonitors.yaml new file mode 100755 index 000000000..a78d1cd00 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/servicemonitors.yaml @@ -0,0 +1,38 @@ +{{- if and .Values.prometheus.enabled .Values.prometheus.additionalServiceMonitors }} +apiVersion: v1 +kind: List +items: +{{- range .Values.prometheus.additionalServiceMonitors }} + - apiVersion: monitoring.coreos.com/v1 + kind: ServiceMonitor + metadata: + name: {{ .name }} + namespace: {{ template "kube-prometheus-stack.namespace" $ }} + labels: + app: {{ template "kube-prometheus-stack.name" $ }}-prometheus +{{ include "kube-prometheus-stack.labels" $ | indent 8 }} + {{- if .additionalLabels }} +{{ toYaml .additionalLabels | indent 8 }} + {{- end }} + spec: + endpoints: +{{ toYaml .endpoints | indent 8 }} + {{- if .jobLabel }} + jobLabel: {{ .jobLabel }} + {{- end }} + {{- if .namespaceSelector }} + namespaceSelector: +{{ toYaml .namespaceSelector | indent 8 }} + {{- end }} + selector: +{{ toYaml .selector | indent 8 }} + {{- if .targetLabels }} + targetLabels: +{{ toYaml .targetLabels | indent 8 }} + {{- end }} + {{- if .podTargetLabels }} + podTargetLabels: +{{ toYaml .podTargetLabels | indent 8 }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/serviceperreplica.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/serviceperreplica.yaml new file mode 100755 index 000000000..1a5543362 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/prometheus/serviceperreplica.yaml @@ -0,0 +1,46 @@ +{{- if and .Values.prometheus.enabled .Values.prometheus.servicePerReplica.enabled }} +{{- $count := .Values.prometheus.prometheusSpec.replicas | int -}} +{{- $serviceValues := .Values.prometheus.servicePerReplica -}} +apiVersion: v1 +kind: List +metadata: + name: {{ include "kube-prometheus-stack.fullname" $ }}-prometheus-serviceperreplica + namespace: {{ template "kube-prometheus-stack.namespace" . }} +items: +{{- range $i, $e := until $count }} + - apiVersion: v1 + kind: Service + metadata: + name: {{ include "kube-prometheus-stack.fullname" $ }}-prometheus-{{ $i }} + namespace: {{ template "kube-prometheus-stack.namespace" $ }} + labels: + app: {{ include "kube-prometheus-stack.name" $ }}-prometheus +{{ include "kube-prometheus-stack.labels" $ | indent 8 }} + {{- if $serviceValues.annotations }} + annotations: +{{ toYaml $serviceValues.annotations | indent 8 }} + {{- end }} + spec: + {{- if $serviceValues.clusterIP }} + clusterIP: {{ $serviceValues.clusterIP }} + {{- end }} + {{- if $serviceValues.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := $serviceValues.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} + {{- end }} + ports: + - name: {{ $.Values.prometheus.prometheusSpec.portName }} + {{- if eq $serviceValues.type "NodePort" }} + nodePort: {{ $serviceValues.nodePort }} + {{- end }} + port: {{ $serviceValues.port }} + targetPort: {{ $serviceValues.targetPort }} + selector: + app: prometheus + prometheus: {{ include "kube-prometheus-stack.fullname" $ }}-prometheus + statefulset.kubernetes.io/pod-name: prometheus-{{ include "kube-prometheus-stack.fullname" $ }}-prometheus-{{ $i }} + type: "{{ $serviceValues.type }}" +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/clusterrole.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/clusterrole.yaml new file mode 100755 index 000000000..a115de7ca --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/clusterrole.yaml @@ -0,0 +1,131 @@ +{{- if and .Values.global.rbac.create .Values.global.rbac.userRoles.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: monitoring-admin + labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }} + {{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }} + rbac.authorization.k8s.io/aggregate-to-admin: "true" + {{- end }} +rules: +- apiGroups: + - monitoring.coreos.com + resources: + - alertmanagers + - prometheuses + - prometheuses/finalizers + - alertmanagers/finalizers + verbs: + - 'get' + - 'list' + - 'watch' +- apiGroups: + - monitoring.coreos.com + resources: + - thanosrulers + - thanosrulers/finalizers + - servicemonitors + - podmonitors + - prometheusrules + - podmonitors + - probes + - probes/finalizers + - alertmanagerconfigs + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: monitoring-edit + labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }} + {{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }} + rbac.authorization.k8s.io/aggregate-to-edit: "true" + {{- end }} +rules: +rules: +- apiGroups: + - monitoring.coreos.com + resources: + - alertmanagers + - prometheuses + - prometheuses/finalizers + - alertmanagers/finalizers + verbs: + - 'get' + - 'list' + - 'watch' +- apiGroups: + - monitoring.coreos.com + resources: + - thanosrulers + - thanosrulers/finalizers + - servicemonitors + - podmonitors + - prometheusrules + - podmonitors + - probes + - alertmanagerconfigs + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: monitoring-view + labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }} + {{- if .Values.global.rbac.userRoles.aggregateToDefaultRoles }} + rbac.authorization.k8s.io/aggregate-to-view: "true" + {{- end }} +rules: +- apiGroups: + - monitoring.coreos.com + resources: + - alertmanagers + - prometheuses + - prometheuses/finalizers + - alertmanagers/finalizers + - thanosrulers + - thanosrulers/finalizers + - servicemonitors + - podmonitors + - prometheusrules + - podmonitors + - probes + - probes/finalizers + - alertmanagerconfigs + verbs: + - 'get' + - 'list' + - 'watch' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: monitoring-ui-view + labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - services/proxy + resourceNames: + - "http:{{ template "kube-prometheus-stack.fullname" . }}-prometheus:{{ .Values.prometheus.service.port }}" + - "https:{{ template "kube-prometheus-stack.fullname" . }}-prometheus:{{ .Values.prometheus.service.port }}" + - "http:{{ template "kube-prometheus-stack.fullname" . }}-alertmanager:{{ .Values.alertmanager.service.port }}" + - "https:{{ template "kube-prometheus-stack.fullname" . }}-alertmanager:{{ .Values.alertmanager.service.port }}" + - "http:{{ include "call-nested" (list . "grafana" "grafana.fullname") }}:{{ .Values.grafana.service.port }}" + - "https:{{ include "call-nested" (list . "grafana" "grafana.fullname") }}:{{ .Values.grafana.service.port }}" + verbs: + - 'get' +- apiGroups: + - "" + resourceNames: + - {{ template "kube-prometheus-stack.fullname" . }}-prometheus + - {{ template "kube-prometheus-stack.fullname" . }}-alertmanager + - {{ include "call-nested" (list . "grafana" "grafana.fullname") }} + resources: + - endpoints + verbs: + - list +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/config-role.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/config-role.yaml new file mode 100755 index 000000000..f48ffc827 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/config-role.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.global.rbac.create .Values.global.rbac.userRoles.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: monitoring-config-admin + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: monitoring-config-edit + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: monitoring-config-view + namespace: {{ template "kube-prometheus-stack.namespace" . }} + labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - configmaps + - secrets + verbs: + - 'get' + - 'list' + - 'watch' +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboard-role.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboard-role.yaml new file mode 100755 index 000000000..d2f81976a --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboard-role.yaml @@ -0,0 +1,47 @@ +{{- if and .Values.global.rbac.create .Values.global.rbac.userRoles.create .Values.grafana.enabled }} +{{- if .Values.grafana.defaultDashboardsEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: monitoring-dashboard-admin + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: monitoring-dashboard-edit + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: monitoring-dashboard-view + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + labels: {{ include "kube-prometheus-stack.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - 'get' + - 'list' + - 'watch' +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/addons/ingress-nginx-dashboard.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/addons/ingress-nginx-dashboard.yaml new file mode 100755 index 000000000..20c57dd2a --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/addons/ingress-nginx-dashboard.yaml @@ -0,0 +1,18 @@ +# Source: +{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled .Values.ingressNginx.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: {{ printf "%s-%s" (include "kube-prometheus-stack.fullname" $) "ingress-nginx" | trunc 63 | trimSuffix "-" }} + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: +{{ (.Files.Glob "files/ingress-nginx/*").AsConfig | indent 2 }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/cluster-dashboards.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/cluster-dashboards.yaml new file mode 100755 index 000000000..d73b25745 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/cluster-dashboards.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: rancher-default-dashboards-cluster + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: +{{ (.Files.Glob "files/rancher/cluster/*").AsConfig | indent 2 }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/default-dashboard.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/default-dashboard.yaml new file mode 100755 index 000000000..8865efa93 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/default-dashboard.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: rancher-default-dashboards-home + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: +{{ (.Files.Glob "files/rancher/home/*").AsConfig | indent 2 }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/k8s-dashboards.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/k8s-dashboards.yaml new file mode 100755 index 000000000..37afc6495 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/k8s-dashboards.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: rancher-default-dashboards-k8s + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: +{{ (.Files.Glob "files/rancher/k8s/*").AsConfig | indent 2 }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/nodes-dashboards.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/nodes-dashboards.yaml new file mode 100755 index 000000000..172c36e9d --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/nodes-dashboards.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: rancher-default-dashboards-nodes + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: +{{ (.Files.Glob "files/rancher/nodes/*").AsConfig | indent 2 }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/pods-dashboards.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/pods-dashboards.yaml new file mode 100755 index 000000000..940f18869 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/pods-dashboards.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: rancher-default-dashboards-pods + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: +{{ (.Files.Glob "files/rancher/pods/*").AsConfig | indent 2 }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/workload-dashboards.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/workload-dashboards.yaml new file mode 100755 index 000000000..d146dacdd --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/dashboards/rancher/workload-dashboards.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + namespace: {{ .Values.grafana.defaultDashboards.namespace }} + name: rancher-default-dashboards-workloads + annotations: +{{ toYaml .Values.grafana.sidecar.dashboards.annotations | indent 4 }} + labels: + {{- if $.Values.grafana.sidecar.dashboards.label }} + {{ $.Values.grafana.sidecar.dashboards.label }}: "1" + {{- end }} + app: {{ template "kube-prometheus-stack.name" $ }}-grafana +{{ include "kube-prometheus-stack.labels" $ | indent 4 }} +data: +{{ (.Files.Glob "files/rancher/workloads/*").AsConfig | indent 2 }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/exporters/ingress-nginx/service.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/exporters/ingress-nginx/service.yaml new file mode 100755 index 000000000..d256576ad --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/exporters/ingress-nginx/service.yaml @@ -0,0 +1,24 @@ +{{- if .Values.ingressNginx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-ingress-nginx + labels: + app: {{ template "kube-prometheus-stack.name" . }}-ingress-nginx + jobLabel: ingress-nginx +{{ include "kube-prometheus-stack.labels" . | indent 4 }} + namespace: {{ .Values.ingressNginx.namespace }} +spec: + clusterIP: None + ports: + - name: http-metrics + port: {{ .Values.ingressNginx.service.port }} + protocol: TCP + targetPort: {{ .Values.ingressNginx.service.targetPort }} + selector: + {{- if .Values.ingressNginx.service.selector }} +{{ toYaml .Values.ingressNginx.service.selector | indent 4 }} + {{- else }} + app: ingress-nginx + {{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/exporters/ingress-nginx/servicemonitor.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/exporters/ingress-nginx/servicemonitor.yaml new file mode 100755 index 000000000..643778772 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/exporters/ingress-nginx/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if .Values.ingressNginx.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kube-prometheus-stack.fullname" . }}-ingress-nginx + namespace: {{ .Values.ingressNginx.namespace }} + labels: + app: {{ template "kube-prometheus-stack.name" . }}-ingress-nginx +{{ include "kube-prometheus-stack.labels" . | indent 4 }} +spec: + jobLabel: jobLabel + selector: + matchLabels: + app: {{ template "kube-prometheus-stack.name" . }}-ingress-nginx + release: {{ $.Release.Name | quote }} + namespaceSelector: + matchNames: + - {{ .Values.ingressNginx.namespace }} + endpoints: + - port: http-metrics + {{- if .Values.ingressNginx.serviceMonitor.interval}} + interval: {{ .Values.ingressNginx.serviceMonitor.interval }} + {{- end }} + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token +{{- if .Values.ingressNginx.serviceMonitor.metricRelabelings }} + metricRelabelings: +{{ tpl (toYaml .Values.ingressNginx.serviceMonitor.metricRelabelings | indent 4) . }} +{{- end }} +{{- if .Values.ingressNginx.serviceMonitor.relabelings }} + relabelings: +{{ toYaml .Values.ingressNginx.serviceMonitor.relabelings | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/hardened.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/hardened.yaml new file mode 100755 index 000000000..f9bf57c7e --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/rancher-monitoring/hardened.yaml @@ -0,0 +1,124 @@ +{{- $namespaces := dict "_0" .Release.Namespace -}} +{{- if and .Values.grafana.enabled .Values.grafana.defaultDashboardsEnabled (not .Values.grafana.defaultDashboards.useExistingNamespace) -}} +{{- $_ := set $namespaces "_1" .Values.grafana.defaultDashboards.namespace -}} +{{- end -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Chart.Name }}-patch-sa + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Chart.Name }}-patch-sa + annotations: + "helm.sh/hook": post-install, post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation +spec: + template: + metadata: + name: {{ .Chart.Name }}-patch-sa + labels: + app: {{ .Chart.Name }}-patch-sa + spec: + serviceAccountName: {{ .Chart.Name }}-patch-sa + securityContext: + runAsNonRoot: true + runAsUser: 1000 + restartPolicy: Never + nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} + tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} + containers: + {{- range $_, $ns := $namespaces }} + - name: patch-sa-{{ $ns }} + image: {{ template "system_default_registry" $ }}{{ $.Values.global.kubectl.repository }}:{{ $.Values.global.kubectl.tag }} + imagePullPolicy: {{ $.Values.global.kubectl.pullPolicy }} + command: ["kubectl", "patch", "serviceaccount", "default", "-p", "{\"automountServiceAccountToken\": false}"] + args: ["-n", "{{ $ns }}"] + {{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Chart.Name }}-patch-sa + labels: + app: {{ .Chart.Name }}-patch-sa +rules: +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: ['get', 'patch'] +- apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ .Chart.Name }}-patch-sa +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Chart.Name }}-patch-sa + labels: + app: {{ .Chart.Name }}-patch-sa +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Chart.Name }}-patch-sa +subjects: +- kind: ServiceAccount + name: {{ .Chart.Name }}-patch-sa + namespace: {{ .Release.Namespace }} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Chart.Name }}-patch-sa + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Chart.Name }}-patch-sa +--- +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ .Chart.Name }}-patch-sa + namespace: {{ .Release.Namespace }} + labels: + app: {{ .Chart.Name }}-patch-sa +spec: + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + volumes: + - 'secret' +{{- range $_, $ns := $namespaces }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-allow-all + namespace: {{ $ns }} +spec: + podSelector: {} + ingress: + - {} + egress: + - {} + policyTypes: + - Ingress + - Egress +{{- end }} diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/validate-install-crd.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/validate-install-crd.yaml new file mode 100755 index 000000000..ac7921f58 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/templates/validate-install-crd.yaml @@ -0,0 +1,21 @@ +#{{- if gt (len (lookup "rbac.authorization.k8s.io/v1" "ClusterRole" "" "")) 0 -}} +# {{- $found := dict -}} +# {{- set $found "monitoring.coreos.com/v1alpha1/AlertmanagerConfig" false -}} +# {{- set $found "monitoring.coreos.com/v1/Alertmanager" false -}} +# {{- set $found "monitoring.coreos.com/v1/PodMonitor" false -}} +# {{- set $found "monitoring.coreos.com/v1/Probe" false -}} +# {{- set $found "monitoring.coreos.com/v1/Prometheus" false -}} +# {{- set $found "monitoring.coreos.com/v1/PrometheusRule" false -}} +# {{- set $found "monitoring.coreos.com/v1/ServiceMonitor" false -}} +# {{- set $found "monitoring.coreos.com/v1/ThanosRuler" false -}} +# {{- range .Capabilities.APIVersions -}} +# {{- if hasKey $found (toString .) -}} +# {{- set $found (toString .) true -}} +# {{- end -}} +# {{- end -}} +# {{- range $_, $exists := $found -}} +# {{- if (eq $exists false) -}} +# {{- required "Required CRDs are missing. Please install the corresponding CRD chart before installing this chart." "" -}} +# {{- end -}} +# {{- end -}} +#{{- end -}} \ No newline at end of file diff --git a/charts/rancher-monitoring/rancher-monitoring/14.5.100/values.yaml b/charts/rancher-monitoring/rancher-monitoring/14.5.100/values.yaml new file mode 100755 index 000000000..00df81c60 --- /dev/null +++ b/charts/rancher-monitoring/rancher-monitoring/14.5.100/values.yaml @@ -0,0 +1,2956 @@ +# Default values for kube-prometheus-stack. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Rancher Monitoring Configuration + +## Configuration for prometheus-adapter +## ref: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-adapter +## +prometheus-adapter: + enabled: true + prometheus: + # Change this if you change the namespaceOverride or nameOverride of prometheus-operator + url: http://rancher-monitoring-prometheus.cattle-monitoring-system.svc + port: 9090 + psp: + create: true + +## RKE PushProx Monitoring +## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox +## +rkeControllerManager: + enabled: false + metricsPort: 10252 + component: kube-controller-manager + clients: + port: 10011 + useLocalhost: true + nodeSelector: + node-role.kubernetes.io/controlplane: "true" + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + +rkeScheduler: + enabled: false + metricsPort: 10251 + component: kube-scheduler + clients: + port: 10012 + useLocalhost: true + nodeSelector: + node-role.kubernetes.io/controlplane: "true" + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + +rkeProxy: + enabled: false + metricsPort: 10249 + component: kube-proxy + clients: + port: 10013 + useLocalhost: true + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + +rkeEtcd: + enabled: false + metricsPort: 2379 + component: kube-etcd + clients: + port: 10014 + https: + enabled: true + certDir: /etc/kubernetes/ssl + certFile: kube-etcd-*.pem + keyFile: kube-etcd-*-key.pem + caCertFile: kube-ca.pem + nodeSelector: + node-role.kubernetes.io/etcd: "true" + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + +## k3s PushProx Monitoring +## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox +## +k3sServer: + enabled: false + metricsPort: 10249 + component: k3s-server + clients: + port: 10013 + useLocalhost: true + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + +## KubeADM PushProx Monitoring +## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox +## +kubeAdmControllerManager: + enabled: false + metricsPort: 10257 + component: kube-controller-manager + clients: + port: 10011 + useLocalhost: true + https: + enabled: true + useServiceAccountCredentials: true + insecureSkipVerify: true + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + +kubeAdmScheduler: + enabled: false + metricsPort: 10259 + component: kube-scheduler + clients: + port: 10012 + useLocalhost: true + https: + enabled: true + useServiceAccountCredentials: true + insecureSkipVerify: true + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + +kubeAdmProxy: + enabled: false + metricsPort: 10249 + component: kube-proxy + clients: + port: 10013 + useLocalhost: true + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + +kubeAdmEtcd: + enabled: false + metricsPort: 2381 + component: kube-etcd + clients: + port: 10014 + useLocalhost: true + nodeSelector: + node-role.kubernetes.io/master: "" + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + +## rke2 PushProx Monitoring +## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-pushprox +## +rke2ControllerManager: + enabled: false + metricsPort: 10252 + component: kube-controller-manager + clients: + port: 10011 + useLocalhost: true + nodeSelector: + node-role.kubernetes.io/master: "true" + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + +rke2Scheduler: + enabled: false + metricsPort: 10251 + component: kube-scheduler + clients: + port: 10012 + useLocalhost: true + nodeSelector: + node-role.kubernetes.io/master: "true" + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + +rke2Proxy: + enabled: false + metricsPort: 10249 + component: kube-proxy + clients: + port: 10013 + useLocalhost: true + tolerations: + - effect: "NoExecute" + operator: "Exists" + - effect: "NoSchedule" + operator: "Exists" + +rke2Etcd: + enabled: false + metricsPort: 2381 + component: kube-etcd + clients: + port: 10014 + useLocalhost: true + nodeSelector: + node-role.kubernetes.io/etcd: "true" + tolerations: + - effect: "NoSchedule" + key: node-role.kubernetes.io/master + operator: "Equal" + +## Component scraping nginx-ingress-controller +## +ingressNginx: + enabled: false + + ## The namespace to search for your nginx-ingress-controller + ## + namespace: ingress-nginx + + service: + port: 9913 + targetPort: 10254 + # selector: + # app: ingress-nginx + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +# Prometheus Operator Configuration + +## Provide a name in place of kube-prometheus-stack for `app:` labels +## NOTE: If you change this value, you must update the prometheus-adapter.prometheus.url +## +nameOverride: "rancher-monitoring" + +## Override the deployment namespace +## NOTE: If you change this value, you must update the prometheus-adapter.prometheus.url +## +namespaceOverride: "cattle-monitoring-system" + +## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.16.6 +## +kubeTargetVersionOverride: "" + +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" + +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +## Create default rules for monitoring the cluster +## +defaultRules: + create: true + rules: + alertmanager: true + etcd: true + general: true + k8s: true + kubeApiserver: true + kubeApiserverAvailability: true + kubeApiserverError: true + kubeApiserverSlos: true + kubelet: true + kubePrometheusGeneral: true + kubePrometheusNodeAlerting: true + kubePrometheusNodeRecording: true + kubernetesAbsent: true + kubernetesApps: true + kubernetesResources: true + kubernetesStorage: true + kubernetesSystem: true + kubeScheduler: true + kubeStateMetrics: true + network: true + node: true + prometheus: true + prometheusOperator: true + time: true + + ## Runbook url prefix for default rules + runbookUrl: https://github.com/kubernetes-monitoring/kubernetes-mixin/tree/master/runbook.md# + ## Reduce app namespace alert scope + appNamespacesTarget: ".*" + + ## Labels for default rules + labels: {} + ## Annotations for default rules + annotations: {} + + ## Additional labels for PrometheusRule alerts + additionalRuleLabels: {} + +## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster. +## +# additionalPrometheusRules: [] +# - name: my-rule-file +# groups: +# - name: my_group +# rules: +# - record: my_record +# expr: 100 * my_record + +## Provide custom recording or alerting rules to be deployed into the cluster. +## +additionalPrometheusRulesMap: {} +# rule-name: +# groups: +# - name: my_group +# rules: +# - record: my_record +# expr: 100 * my_record + +## +global: + cattle: + systemDefaultRegistry: "" + ## Windows Monitoring + ## ref: https://github.com/rancher/charts/tree/dev-v2.5-source/packages/rancher-windows-exporter + ## + ## Deploys a DaemonSet of Prometheus exporters based on https://github.com/prometheus-community/windows_exporter. + ## Every Windows host must have a wins version of 0.1.0+ to use this chart (default as of Rancher 2.5.8). + ## To upgrade wins versions on Windows hosts, see https://github.com/rancher/wins/tree/master/charts/rancher-wins-upgrader. + ## + windows: + enabled: false + kubectl: + repository: rancher/kubectl + tag: v1.20.2 + pullPolicy: IfNotPresent + rbac: + ## Create RBAC resources for ServiceAccounts and users + ## + create: true + + userRoles: + ## Create default user ClusterRoles to allow users to interact with Prometheus CRs, ConfigMaps, and Secrets + create: true + ## Aggregate default user ClusterRoles into default k8s ClusterRoles + aggregateToDefaultRoles: true + + pspEnabled: true + pspAnnotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + +## Configuration for alertmanager +## ref: https://prometheus.io/docs/alerting/alertmanager/ +## +alertmanager: + + ## Deploy alertmanager + ## + enabled: true + + ## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2 + ## + apiVersion: v2 + + ## Service account for Alertmanager to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + annotations: {} + + ## Configure pod disruption budgets for Alertmanager + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + ## This configuration is immutable once created and will require the PDB to be deleted to be changed + ## https://github.com/kubernetes/kubernetes/issues/45398 + ## + podDisruptionBudget: + enabled: false + minAvailable: 1 + maxUnavailable: "" + + ## Alertmanager configuration directives + ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file + ## https://prometheus.io/webtools/alerting/routing-tree-editor/ + ## + ## Example Slack Config + ## config: + ## route: + ## group_by: ['job'] + ## group_wait: 30s + ## group_interval: 5m + ## repeat_interval: 3h + ## receiver: 'slack-notifications' + ## receivers: + ## - name: 'slack-notifications' + ## slack_configs: + ## - send_resolved: true + ## text: '{{ template "slack.rancher.text" . }}' + ## api_url: + ## templates: + ## - /etc/alertmanager/config/*.tmpl + config: + global: + resolve_timeout: 5m + route: + group_by: ['job'] + group_wait: 30s + group_interval: 5m + repeat_interval: 12h + receiver: 'null' + routes: + - match: + alertname: Watchdog + receiver: 'null' + receivers: + - name: 'null' + templates: + - '/etc/alertmanager/config/*.tmpl' + + ## Pass the Alertmanager configuration directives through Helm's templating + ## engine. If the Alertmanager configuration contains Alertmanager templates, + ## they'll need to be properly escaped so that they are not interpreted by + ## Helm + ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function + ## https://prometheus.io/docs/alerting/configuration/#tmpl_string + ## https://prometheus.io/docs/alerting/notifications/ + ## https://prometheus.io/docs/alerting/notification_examples/ + tplConfig: false + + ## Alertmanager template files to format alerts + ## By default, templateFiles are placed in /etc/alertmanager/config/ and if + ## they have a .tmpl file suffix will be loaded. See config.templates above + ## to change, add other suffixes. If adding other suffixes, be sure to update + ## config.templates above to include those suffixes. + ## ref: https://prometheus.io/docs/alerting/notifications/ + ## https://prometheus.io/docs/alerting/notification_examples/ + ## + templateFiles: + rancher_defaults.tmpl: |- + {{- define "slack.rancher.text" -}} + {{ template "rancher.text_multiple" . }} + {{- end -}} + + {{- define "rancher.text_multiple" -}} + *[GROUP - Details]* + One or more alarms in this group have triggered a notification. + + {{- if gt (len .GroupLabels.Values) 0 }} + *Group Labels:* + {{- range .GroupLabels.SortedPairs }} + • *{{ .Name }}:* `{{ .Value }}` + {{- end }} + {{- end }} + {{- if .ExternalURL }} + *Link to AlertManager:* {{ .ExternalURL }} + {{- end }} + + {{- range .Alerts }} + {{ template "rancher.text_single" . }} + {{- end }} + {{- end -}} + + {{- define "rancher.text_single" -}} + {{- if .Labels.alertname }} + *[ALERT - {{ .Labels.alertname }}]* + {{- else }} + *[ALERT]* + {{- end }} + {{- if .Labels.severity }} + *Severity:* `{{ .Labels.severity }}` + {{- end }} + {{- if .Labels.cluster }} + *Cluster:* {{ .Labels.cluster }} + {{- end }} + {{- if .Annotations.summary }} + *Summary:* {{ .Annotations.summary }} + {{- end }} + {{- if .Annotations.message }} + *Message:* {{ .Annotations.message }} + {{- end }} + {{- if .Annotations.description }} + *Description:* {{ .Annotations.description }} + {{- end }} + {{- if .Annotations.runbook_url }} + *Runbook URL:* <{{ .Annotations.runbook_url }}|:spiral_note_pad:> + {{- end }} + {{- with .Labels }} + {{- with .Remove (stringSlice "alertname" "severity" "cluster") }} + {{- if gt (len .) 0 }} + *Additional Labels:* + {{- range .SortedPairs }} + • *{{ .Name }}:* `{{ .Value }}` + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Annotations }} + {{- with .Remove (stringSlice "summary" "message" "description" "runbook_url") }} + {{- if gt (len .) 0 }} + *Additional Annotations:* + {{- range .SortedPairs }} + • *{{ .Name }}:* `{{ .Value }}` + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- end -}} + + ingress: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + + labels: {} + + ## Hosts must be provided if Ingress is enabled. + ## + hosts: [] + # - alertmanager.domain.com + + ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## TLS configuration for Alertmanager Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: alertmanager-general-tls + # hosts: + # - alertmanager.example.com + + ## Configuration for Alertmanager secret + ## + secret: + + # Should the Alertmanager Config Secret be cleaned up on an uninstall? + # This is set to false by default to prevent the loss of alerting configuration on an uninstall + # Only used Alertmanager is deployed and alertmanager.alertmanagerSpec.useExistingSecret=false + # + cleanupOnUninstall: false + + # The image used to manage the Alertmanager Config Secret's lifecycle + # Only used Alertmanager is deployed and alertmanager.alertmanagerSpec.useExistingSecret=false + # + image: + repository: rancher/rancher-agent + tag: v2.5.7 + pullPolicy: IfNotPresent + + securityContext: + runAsNonRoot: true + runAsUser: 1000 + + annotations: {} + + ## Configuration for creating an Ingress that will map to each Alertmanager replica service + ## alertmanager.servicePerReplica must be enabled + ## + ingressPerReplica: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + + ## Final form of the hostname for each per replica ingress is + ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} + ## + ## Prefix for the per replica ingress that will have `-$replicaNumber` + ## appended to the end + hostPrefix: "" + ## Domain that will be used for the per replica ingress + hostDomain: "" + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## Secret name containing the TLS certificate for alertmanager per replica ingress + ## Secret must be manually created in the namespace + tlsSecretName: "" + + ## Separated secret for each per replica Ingress. Can be used together with cert-manager + ## + tlsSecretPerReplica: + enabled: false + ## Final form of the secret for each per replica ingress is + ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} + ## + prefix: "alertmanager" + + ## Configuration for Alertmanager service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Port for Alertmanager Service to listen on + ## + port: 9093 + ## To be used with a proxy extraContainer port + ## + targetPort: 9093 + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30903 + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + + ## Additional ports to open for Alertmanager service + additionalPorts: [] + + externalIPs: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + ## Service type + ## + type: ClusterIP + + ## Configuration for creating a separate Service for each statefulset Alertmanager replica + ## + servicePerReplica: + enabled: false + annotations: {} + + ## Port for Alertmanager Service per replica to listen on + ## + port: 9093 + + ## To be used with a proxy extraContainer port + targetPort: 9093 + + ## Port to expose on each node + ## Only used if servicePerReplica.type is 'NodePort' + ## + nodePort: 30904 + + ## Loadbalancer source IP ranges + ## Only used if servicePerReplica.type is "LoadBalancer" + loadBalancerSourceRanges: [] + ## Service type + ## + type: ClusterIP + + ## If true, create a serviceMonitor for alertmanager + ## + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + selfMonitor: true + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#tlsconfig + tlsConfig: {} + + bearerTokenFile: "" + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Settings affecting alertmanagerSpec + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#alertmanagerspec + ## + alertmanagerSpec: + ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + ## Metadata Labels and Annotations gets propagated to the Alertmanager pods. + ## + podMetadata: {} + + ## Image of Alertmanager + ## + image: + repository: rancher/mirrored-prom-alertmanager + tag: v0.21.0 + sha: "" + + ## If true then the user will be responsible to provide a secret with alertmanager configuration + ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used + ## + useExistingSecret: false + + ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the + ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/. + ## + secrets: [] + + ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. + ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/. + ## + configMaps: [] + + ## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for + ## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config. + ## + # configSecret: + + ## AlertmanagerConfigs to be selected to merge and configure Alertmanager with. + ## + alertmanagerConfigSelector: {} + ## Example which selects all alertmanagerConfig resources + ## with label "alertconfig" with values any of "example-config" or "example-config-2" + # alertmanagerConfigSelector: + # matchExpressions: + # - key: alertconfig + # operator: In + # values: + # - example-config + # - example-config-2 + # + ## Example which selects all alertmanagerConfig resources with label "role" set to "example-config" + # alertmanagerConfigSelector: + # matchLabels: + # role: example-config + + ## Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace. + ## + alertmanagerConfigNamespaceSelector: {} + ## Example which selects all namespaces + ## with label "alertmanagerconfig" with values any of "example-namespace" or "example-namespace-2" + # alertmanagerConfigNamespaceSelector: + # matchExpressions: + # - key: alertmanagerconfig + # operator: In + # values: + # - example-namespace + # - example-namespace-2 + + ## Example which selects all namespaces with label "alertmanagerconfig" set to "enabled" + # alertmanagerConfigNamespaceSelector: + # matchLabels: + # alertmanagerconfig: enabled + + ## Define Log Format + # Use logfmt (default) or json logging + logFormat: logfmt + + ## Log level for Alertmanager to be configured with. + ## + logLevel: info + + ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the + ## running cluster equal to the expected size. + replicas: 1 + + ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression + ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours). + ## + retention: 120h + + ## Storage is the definition of how storage will be used by the Alertmanager instances. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md + ## + storage: {} + # volumeClaimTemplate: + # spec: + # storageClassName: gluster + # accessModes: ["ReadWriteOnce"] + # resources: + # requests: + # storage: 50Gi + # selector: {} + + + ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false + ## + externalUrl: + + ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, + ## but the server serves requests under a different route prefix. For example for use with kubectl proxy. + ## + routePrefix: / + + ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions. + ## + paused: false + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Define resources requests and limits for single Pods. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + limits: + memory: 500Mi + cpu: 1000m + requests: + memory: 100Mi + cpu: 100m + + ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. + ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. + ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. + ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. + ## + podAntiAffinity: "" + + ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. + ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone + ## + podAntiAffinityTopologyKey: kubernetes.io/hostname + + ## Assign custom affinity rules to the alertmanager instance + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + + ## If specified, the pod's tolerations. + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## If specified, the pod's topology spread constraints. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app: alertmanager + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + + ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. + ## Note this is only for the Alertmanager UI, not the gossip communication. + ## + listenLocal: false + + ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod. + ## + containers: [] + + # Additional volumes on the output StatefulSet definition. + volumes: [] + + # Additional VolumeMounts on the output StatefulSet definition. + volumeMounts: [] + + ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes + ## (permissions, dir tree) on mounted volumes before starting prometheus + initContainers: [] + + ## Priority class assigned to the Pods + ## + priorityClassName: "" + + ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. + ## + additionalPeers: [] + + ## PortName to use for Alert Manager. + ## + portName: "web" + + ## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918 + ## + clusterAdvertiseAddress: false + + ## ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica. + ## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each. + forceEnableClusterMode: false + + +## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml +## +grafana: + enabled: true + namespaceOverride: "" + + ## Grafana's primary configuration + ## NOTE: values in map will be converted to ini format + ## ref: http://docs.grafana.org/installation/configuration/ + ## + grafana.ini: + users: + auto_assign_org_role: Viewer + auth: + disable_login_form: false + auth.anonymous: + enabled: true + org_role: Viewer + auth.basic: + enabled: false + dashboards: + # Modify this value to change the default dashboard shown on the main Grafana page + default_home_dashboard_path: /tmp/dashboards/rancher-default-home.json + security: + # Required to embed dashboards in Rancher Cluster Overview Dashboard on Cluster Explorer + allow_embedding: true + + deploymentStrategy: + type: Recreate + + ## Deploy default dashboards. + ## + defaultDashboardsEnabled: true + + # Additional options for defaultDashboards + defaultDashboards: + # The default namespace to place defaultDashboards within + namespace: cattle-dashboards + # Whether to create the default namespace as a Helm managed namespace or use an existing namespace + # If false, the defaultDashboards.namespace will be created as a Helm managed namespace + useExistingNamespace: false + # Whether the Helm managed namespace created by this chart should be left behind on a Helm uninstall + # If you place other dashboards in this namespace, then they will be deleted on a helm uninstall + # Ignore if useExistingNamespace is true + cleanupOnUninstall: false + + adminPassword: prom-operator + + ingress: + ## If true, Grafana Ingress will be created + ## + enabled: false + + ## Annotations for Grafana Ingress + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + + ## Labels to be added to the Ingress + ## + labels: {} + + ## Hostnames. + ## Must be provided if Ingress is enable. + ## + # hosts: + # - grafana.domain.com + hosts: [] + + ## Path for grafana ingress + path: / + + ## TLS configuration for grafana Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: grafana-general-tls + # hosts: + # - grafana.example.com + + sidecar: + dashboards: + enabled: true + label: grafana_dashboard + searchNamespace: cattle-dashboards + + ## Annotations for Grafana dashboard configmaps + ## + annotations: {} + multicluster: false + datasources: + enabled: true + defaultDatasourceEnabled: true + + # If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default + # defaultDatasourceScrapeInterval: 15s + + ## Annotations for Grafana datasource configmaps + ## + annotations: {} + + ## Create datasource for each Pod of Prometheus StatefulSet; + ## this uses headless service `prometheus-operated` which is + ## created by Prometheus Operator + ## ref: https://git.io/fjaBS + createPrometheusReplicasDatasources: false + label: grafana_datasource + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # configMap: certs-configmap + # readOnly: true + + ## Configure additional grafana datasources (passed through tpl) + ## ref: http://docs.grafana.org/administration/provisioning/#datasources + additionalDataSources: [] + # - name: prometheus-sample + # access: proxy + # basicAuth: true + # basicAuthPassword: pass + # basicAuthUser: daco + # editable: false + # jsonData: + # tlsSkipVerify: true + # orgId: 1 + # type: prometheus + # url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090 + # version: 1 + + ## Passed to grafana subchart and used by servicemonitor below + ## + service: + portName: nginx-http + ## Port for Grafana Service to listen on + ## + port: 80 + ## To be used with a proxy extraContainer port + ## + targetPort: 8080 + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30950 + ## Service type + ## + type: ClusterIP + + proxy: + image: + repository: rancher/mirrored-library-nginx + tag: 1.19.2-alpine + + ## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod + extraContainers: | + - name: grafana-proxy + args: + - nginx + - -g + - daemon off; + - -c + - /nginx/nginx.conf + image: "{{ template "system_default_registry" . }}{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}" + ports: + - containerPort: 8080 + name: nginx-http + protocol: TCP + volumeMounts: + - mountPath: /nginx + name: grafana-nginx + - mountPath: /var/cache/nginx + name: nginx-home + securityContext: + runAsUser: 101 + runAsGroup: 101 + + ## Volumes that can be used in containers + extraContainerVolumes: + - name: nginx-home + emptyDir: {} + - name: grafana-nginx + configMap: + name: grafana-nginx-proxy-config + items: + - key: nginx.conf + mode: 438 + path: nginx.conf + + ## If true, create a serviceMonitor for grafana + ## + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + selfMonitor: true + + # Path to use for scraping metrics. Might be different if server.root_url is set + # in grafana.ini + path: "/metrics" + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + resources: + limits: + memory: 200Mi + cpu: 200m + requests: + memory: 100Mi + cpu: 100m + +## Component scraping the kube api server +## +kubeApiServer: + enabled: true + tlsConfig: + serverName: kubernetes + insecureSkipVerify: false + + ## If your API endpoint address is not reachable (as in AKS) you can replace it with the kubernetes service + ## + relabelings: [] + # - sourceLabels: + # - __meta_kubernetes_namespace + # - __meta_kubernetes_service_name + # - __meta_kubernetes_endpoint_port_name + # action: keep + # regex: default;kubernetes;https + # - targetLabel: __address__ + # replacement: kubernetes.default.svc:443 + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + jobLabel: component + selector: + matchLabels: + component: apiserver + provider: kubernetes + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + +## Component scraping the kubelet and kubelet-hosted cAdvisor +## +kubelet: + enabled: true + namespace: kube-system + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## Enable scraping the kubelet over https. For requirements to enable this see + ## https://github.com/prometheus-operator/prometheus-operator/issues/926 + ## + https: true + + ## Enable scraping /metrics/cadvisor from kubelet's service + ## + cAdvisor: true + + ## Enable scraping /metrics/probes from kubelet's service + ## + probes: true + + ## Enable scraping /metrics/resource from kubelet's service + ## This is disabled by default because container metrics are already exposed by cAdvisor + ## + resource: false + # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource + resourcePath: "/metrics/resource/v1alpha1" + ## Metric relabellings to apply to samples before ingestion + ## + cAdvisorMetricRelabelings: [] + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + ## Metric relabellings to apply to samples before ingestion + ## + probesMetricRelabelings: [] + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + # relabel configs to apply to samples before ingestion. + # metrics_path is required to match upstream rules and charts + ## + cAdvisorRelabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + probesRelabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + resourceRelabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + metricRelabelings: [] + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + # relabel configs to apply to samples before ingestion. + # metrics_path is required to match upstream rules and charts + ## + relabelings: + - sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Component scraping the kube controller manager +## +kubeControllerManager: + enabled: false + + ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## If using kubeControllerManager.endpoints only the port and targetPort are used + ## + service: + enabled: true + port: 10252 + targetPort: 10252 + # selector: + # component: kube-controller-manager + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## Enable scraping kube-controller-manager over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks + ## + https: false + + # Skip TLS certificate validation when scraping + insecureSkipVerify: null + + # Name of the server to use when validating TLS certificate + serverName: null + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Component scraping coreDns. Use either this or kubeDns +## +coreDns: + enabled: true + service: + port: 9153 + targetPort: 9153 + # selector: + # k8s-app: kube-dns + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Component scraping kubeDns. Use either this or coreDns +## +kubeDns: + enabled: false + service: + dnsmasq: + port: 10054 + targetPort: 10054 + skydns: + port: 10055 + targetPort: 10055 + # selector: + # k8s-app: kube-dns + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + dnsmasqMetricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + dnsmasqRelabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Component scraping etcd +## +kubeEtcd: + enabled: false + + ## If your etcd is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used + ## + service: + enabled: true + port: 2379 + targetPort: 2379 + # selector: + # component: etcd + + ## Configure secure access to the etcd cluster by loading a secret into prometheus and + ## specifying security configuration below. For example, with a secret named etcd-client-cert + ## + ## serviceMonitor: + ## scheme: https + ## insecureSkipVerify: false + ## serverName: localhost + ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca + ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client + ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key + ## + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + scheme: http + insecureSkipVerify: false + serverName: "" + caFile: "" + certFile: "" + keyFile: "" + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + +## Component scraping kube scheduler +## +kubeScheduler: + enabled: false + + ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## If using kubeScheduler.endpoints only the port and targetPort are used + ## + service: + enabled: true + port: 10251 + targetPort: 10251 + # selector: + # component: kube-scheduler + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + ## Enable scraping kube-scheduler over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks + ## + https: false + + ## Skip TLS certificate validation when scraping + insecureSkipVerify: null + + ## Name of the server to use when validating TLS certificate + serverName: null + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + +## Component scraping kube proxy +## +kubeProxy: + enabled: false + + ## If your kube proxy is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + service: + enabled: true + port: 10249 + targetPort: 10249 + # selector: + # k8s-app: kube-proxy + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## Enable scraping kube-proxy over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks + ## + https: false + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + +## Component scraping kube state metrics +## +kubeStateMetrics: + enabled: true + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + ## Override serviceMonitor selector + ## + selectorOverride: {} + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Configuration for kube-state-metrics subchart +## +kube-state-metrics: + namespaceOverride: "" + rbac: + create: true + podSecurityPolicy: + enabled: true + resources: + limits: + cpu: 100m + memory: 200Mi + requests: + cpu: 100m + memory: 130Mi + +## Deploy node exporter as a daemonset to all nodes +## +nodeExporter: + enabled: true + + ## Use the value configured in prometheus-node-exporter.podLabels + ## + jobLabel: jobLabel + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used. + ## + scrapeTimeout: "" + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - sourceLabels: [__name__] + # separator: ; + # regex: ^node_mountstats_nfs_(event|operations|transport)_.+ + # replacement: $1 + # action: drop + + ## relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Configuration for prometheus-node-exporter subchart +## +prometheus-node-exporter: + namespaceOverride: "" + podLabels: + ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards + ## + jobLabel: node-exporter + extraArgs: + - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/) + - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ + service: + port: 9796 + targetPort: 9796 + resources: + limits: + cpu: 200m + memory: 50Mi + requests: + cpu: 100m + memory: 30Mi + +## Manages Prometheus and Alertmanager components +## +prometheusOperator: + enabled: true + + ## Prometheus-Operator v0.39.0 and later support TLS natively. + ## + tls: + enabled: true + # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants + tlsMinVersion: VersionTLS13 + # Users who are deploying this chart in GKE private clusters will need to add firewall rules to expose this port for admissions webhooks + internalPort: 8443 + + ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted + ## rules from making their way into prometheus and potentially preventing the container from starting + admissionWebhooks: + failurePolicy: Fail + enabled: true + ## A PEM encoded CA bundle which will be used to validate the webhook's server certificate. + ## If unspecified, system trust roots on the apiserver are used. + caBundle: "" + ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data. + ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own + ## certs ahead of time if you wish. + ## + patch: + enabled: true + image: + repository: rancher/mirrored-jettech-kube-webhook-certgen + tag: v1.5.0 + sha: "" + pullPolicy: IfNotPresent + resources: {} + ## Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + nodeSelector: {} + affinity: {} + tolerations: [] + # Use certmanager to generate webhook certs + certManager: + enabled: false + # issuerRef: + # name: "issuer" + # kind: "ClusterIssuer" + + ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list). + ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration + ## + namespaces: {} + # releaseNamespace: true + # additional: + # - kube-system + + ## Namespaces not to scope the interaction of the Prometheus Operator (deny list). + ## + denyNamespaces: [] + + ## Filter namespaces to look for prometheus-operator custom resources + ## + alertmanagerInstanceNamespaces: [] + prometheusInstanceNamespaces: [] + thanosRulerInstanceNamespaces: [] + + ## The clusterDomain value will be added to the cluster.peer option of the alertmanager. + ## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value) + ## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094 + ## + # clusterDomain: "cluster.local" + + ## Service account for Alertmanager to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + + ## Configuration for Prometheus operator service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30080 + + nodePortTls: 30443 + + ## Additional ports to open for Prometheus service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services + ## + additionalPorts: [] + + ## Loadbalancer IP + ## Only use if service.type is "LoadBalancer" + ## + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Service type + ## NodePort, ClusterIP, LoadBalancer + ## + type: ClusterIP + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + ## Labels to add to the operator pod + ## + podLabels: {} + + ## Annotations to add to the operator pod + ## + podAnnotations: {} + + ## Assign a PriorityClassName to pods if set + # priorityClassName: "" + + ## Define Log Format + # Use logfmt (default) or json logging + # logFormat: logfmt + + ## Decrease log verbosity to errors only + # logLevel: error + + ## If true, the operator will create and maintain a service for scraping kubelets + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/helm/prometheus-operator/README.md + ## + kubeletService: + enabled: true + namespace: kube-system + + ## Create a servicemonitor for the operator + ## + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + ## Scrape timeout. If not set, the Prometheus default scrape timeout is used. + scrapeTimeout: "" + selfMonitor: true + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Resource limits & requests + ## + resources: + limits: + cpu: 200m + memory: 500Mi + requests: + cpu: 100m + memory: 100Mi + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## Assign custom affinity rules to the prometheus operator + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + + ## Prometheus-operator image + ## + image: + repository: rancher/mirrored-prometheus-operator-prometheus-operator + tag: v0.46.0 + sha: "" + pullPolicy: IfNotPresent + + ## Prometheus image to use for prometheuses managed by the operator + ## + # prometheusDefaultBaseImage: quay.io/prometheus/prometheus + + ## Alertmanager image to use for alertmanagers managed by the operator + ## + # alertmanagerDefaultBaseImage: quay.io/prometheus/alertmanager + + ## Prometheus-config-reloader image to use for config and rule reloading + ## + prometheusConfigReloaderImage: + repository: rancher/mirrored-prometheus-operator-prometheus-config-reloader + tag: v0.46.0 + sha: "" + + ## Set the prometheus config reloader side-car CPU limit + ## + configReloaderCpu: 100m + + ## Set the prometheus config reloader side-car memory limit + ## + configReloaderMemory: 50Mi + + ## Set a Field Selector to filter watched secrets + ## + secretFieldSelector: "" + +## Deploy a Prometheus instance +## +prometheus: + + enabled: true + + ## Annotations for Prometheus + ## + annotations: {} + + ## Service account for Prometheuses to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + + # Service for thanos service discovery on sidecar + # Enable this can make Thanos Query can use + # `--store=dnssrv+_grpc._tcp.${kube-prometheus-stack.fullname}-thanos-discovery.${namespace}.svc.cluster.local` to discovery + # Thanos sidecar on prometheus nodes + # (Please remember to change ${kube-prometheus-stack.fullname} and ${namespace}. Not just copy and paste!) + thanosService: + enabled: false + annotations: {} + labels: {} + portName: grpc + port: 10901 + targetPort: "grpc" + clusterIP: "None" + + ## Service type + ## + type: ClusterIP + + ## Port to expose on each node + ## + nodePort: 30901 + + ## Service type + ## + type: ClusterIP + + ## Port to expose on each node + ## + nodePort: 30901 + + ## Configuration for Prometheus service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Port for Prometheus Service to listen on + ## + port: 9090 + + ## To be used with a proxy extraContainer port + targetPort: 8081 + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30090 + + ## Loadbalancer IP + ## Only use if service.type is "LoadBalancer" + loadBalancerIP: "" + loadBalancerSourceRanges: [] + ## Service type + ## + type: ClusterIP + + sessionAffinity: "" + + ## Configuration for creating a separate Service for each statefulset Prometheus replica + ## + servicePerReplica: + enabled: false + annotations: {} + + ## Port for Prometheus Service per replica to listen on + ## + port: 9090 + + ## To be used with a proxy extraContainer port + targetPort: 9090 + + ## Port to expose on each node + ## Only used if servicePerReplica.type is 'NodePort' + ## + nodePort: 30091 + + ## Loadbalancer source IP ranges + ## Only used if servicePerReplica.type is "LoadBalancer" + loadBalancerSourceRanges: [] + ## Service type + ## + type: ClusterIP + + ## Configure pod disruption budgets for Prometheus + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + ## This configuration is immutable once created and will require the PDB to be deleted to be changed + ## https://github.com/kubernetes/kubernetes/issues/45398 + ## + podDisruptionBudget: + enabled: false + minAvailable: 1 + maxUnavailable: "" + + # Ingress exposes thanos sidecar outside the cluster + thanosIngress: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + servicePort: 10901 + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30901 + + ## Hosts must be provided if Ingress is enabled. + ## + hosts: [] + # - thanos-gateway.domain.com + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## TLS configuration for Thanos Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: thanos-gateway-tls + # hosts: + # - thanos-gateway.domain.com + + ingress: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + + ## Hostnames. + ## Must be provided if Ingress is enabled. + ## + # hosts: + # - prometheus.domain.com + hosts: [] + + ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## TLS configuration for Prometheus Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-general-tls + # hosts: + # - prometheus.example.com + + ## Configuration for creating an Ingress that will map to each Prometheus replica service + ## prometheus.servicePerReplica must be enabled + ## + ingressPerReplica: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + + ## Final form of the hostname for each per replica ingress is + ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} + ## + ## Prefix for the per replica ingress that will have `-$replicaNumber` + ## appended to the end + hostPrefix: "" + ## Domain that will be used for the per replica ingress + hostDomain: "" + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## Secret name containing the TLS certificate for Prometheus per replica ingress + ## Secret must be manually created in the namespace + tlsSecretName: "" + + ## Separated secret for each per replica Ingress. Can be used together with cert-manager + ## + tlsSecretPerReplica: + enabled: false + ## Final form of the secret for each per replica ingress is + ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} + ## + prefix: "prometheus" + + ## Configure additional options for default pod security policy for Prometheus + ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + podSecurityPolicy: + allowedCapabilities: [] + allowedHostPaths: [] + volumes: [] + + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + selfMonitor: true + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#tlsconfig + tlsConfig: {} + + bearerTokenFile: + + ## metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Settings affecting prometheusSpec + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#prometheusspec + ## + prometheusSpec: + ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos + ## + disableCompaction: false + ## APIServerConfig + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#apiserverconfig + ## + apiserverConfig: {} + + ## Interval between consecutive scrapes. + ## Defaults to 30s. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183 + ## + scrapeInterval: "" + + ## Number of seconds to wait for target to respond before erroring + ## + scrapeTimeout: "" + + ## Interval between consecutive evaluations. + ## + evaluationInterval: "" + + ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. + ## + listenLocal: false + + ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series. + ## This is disabled by default. + ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis + ## + enableAdminAPI: false + + ## Image of Prometheus. + ## + image: + repository: rancher/mirrored-prometheus-prometheus + tag: v2.24.0 + sha: "" + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## If specified, the pod's topology spread constraints. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app: prometheus + + ## Alertmanagers to which alerts will be sent + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#alertmanagerendpoints + ## + ## Default configuration will connect to the alertmanager deployed as part of this release + ## + alertingEndpoints: [] + # - name: "" + # namespace: "" + # port: http + # scheme: http + # pathPrefix: "" + # tlsConfig: {} + # bearerTokenFile: "" + # apiVersion: v2 + + ## External labels to add to any time series or alerts when communicating with external systems + ## + externalLabels: {} + + ## Name of the external label used to denote replica name + ## + replicaExternalLabelName: "" + + ## If true, the Operator won't add the external label used to denote replica name + ## + replicaExternalLabelNameClear: false + + ## Name of the external label used to denote Prometheus instance name + ## + prometheusExternalLabelName: "" + + ## If true, the Operator won't add the external label used to denote Prometheus instance name + ## + prometheusExternalLabelNameClear: false + + ## External URL at which Prometheus will be reachable. + ## + externalUrl: "" + + ## Ignore NamespaceSelector settings from the PodMonitor and ServiceMonitor configs + ## If true, PodMonitors and ServiceMonitors can only discover Pods and Services within the namespace they are deployed into + ## + ignoreNamespaceSelectors: false + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. + ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not + ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated + ## with the new list of secrets. + ## + secrets: [] + + ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. + ## The ConfigMaps are mounted into /etc/prometheus/configmaps/. + ## + configMaps: [] + + ## QuerySpec defines the query command line flags when starting Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#queryspec + ## + query: {} + + ## Namespaces to be selected for PrometheusRules discovery. + ## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage + ## + ruleNamespaceSelector: {} + + ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the PrometheusRule resources created + ## + ruleSelectorNilUsesHelmValues: false + + ## PrometheusRules to be selected for target discovery. + ## If {}, select all PrometheusRules + ## + ruleSelector: {} + ## Example which select all PrometheusRules resources + ## with label "prometheus" with values any of "example-rules" or "example-rules-2" + # ruleSelector: + # matchExpressions: + # - key: prometheus + # operator: In + # values: + # - example-rules + # - example-rules-2 + # + ## Example which select all PrometheusRules resources with label "role" set to "example-rules" + # ruleSelector: + # matchLabels: + # role: example-rules + + ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the servicemonitors created + ## + serviceMonitorSelectorNilUsesHelmValues: false + + ## ServiceMonitors to be selected for target discovery. + ## If {}, select all ServiceMonitors + ## + serviceMonitorSelector: {} + ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel" + # serviceMonitorSelector: + # matchLabels: + # prometheus: somelabel + + ## Namespaces to be selected for ServiceMonitor discovery. + ## + serviceMonitorNamespaceSelector: {} + ## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel" + # serviceMonitorNamespaceSelector: + # matchLabels: + # prometheus: somelabel + + ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the podmonitors created + ## + podMonitorSelectorNilUsesHelmValues: false + + ## PodMonitors to be selected for target discovery. + ## If {}, select all PodMonitors + ## + podMonitorSelector: {} + ## Example which selects PodMonitors with label "prometheus" set to "somelabel" + # podMonitorSelector: + # matchLabels: + # prometheus: somelabel + + ## Namespaces to be selected for PodMonitor discovery. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage + ## + podMonitorNamespaceSelector: {} + + ## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the probes created + ## + probeSelectorNilUsesHelmValues: true + + ## Probes to be selected for target discovery. + ## If {}, select all Probes + ## + probeSelector: {} + ## Example which selects Probes with label "prometheus" set to "somelabel" + # probeSelector: + # matchLabels: + # prometheus: somelabel + + ## Namespaces to be selected for Probe discovery. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#namespaceselector for usage + ## + probeNamespaceSelector: {} + + ## How long to retain metrics + ## + retention: 10d + + ## Maximum size of metrics + ## + retentionSize: "" + + ## Enable compression of the write-ahead log using Snappy. + ## + walCompression: false + + ## If true, the Operator won't process any Prometheus configuration changes + ## + paused: false + + ## Number of replicas of each shard to deploy for a Prometheus deployment. + ## Number of replicas multiplied by shards is the total number of Pods created. + ## + replicas: 1 + + ## EXPERIMENTAL: Number of shards to distribute targets onto. + ## Number of replicas multiplied by shards is the total number of Pods created. + ## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved. + ## Increasing shards will not reshard data either but it will continue to be available from the same instances. + ## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location. + ## Sharding is done on the content of the `__address__` target meta-label. + ## + shards: 1 + + ## Log level for Prometheus be configured in + ## + logLevel: info + + ## Log format for Prometheus be configured in + ## + logFormat: logfmt + + ## Prefix used to register routes, overriding externalUrl route. + ## Useful for proxies that rewrite URLs. + ## + routePrefix: / + + ## Standard object’s metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + ## Metadata Labels and Annotations gets propagated to the prometheus pods. + ## + podMetadata: {} + # labels: + # app: prometheus + # k8s-app: prometheus + + ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. + ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. + ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. + ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. + podAntiAffinity: "" + + ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. + ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone + ## + podAntiAffinityTopologyKey: kubernetes.io/hostname + + ## Assign custom affinity rules to the prometheus instance + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + + ## The remote_read spec configuration for Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotereadspec + remoteRead: [] + # - url: http://remote1/read + + ## The remote_write spec configuration for Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#remotewritespec + remoteWrite: [] + # - url: http://remote1/push + + ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature + remoteWriteDashboards: false + + ## Resource limits & requests + ## + resources: + limits: + memory: 1500Mi + cpu: 1000m + requests: + memory: 750Mi + cpu: 750m + + ## Prometheus StorageSpec for persistent data + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/user-guides/storage.md + ## + storageSpec: {} + ## Using PersistentVolumeClaim + ## + # volumeClaimTemplate: + # spec: + # storageClassName: gluster + # accessModes: ["ReadWriteOnce"] + # resources: + # requests: + # storage: 50Gi + # selector: {} + + ## Using tmpfs volume + ## + # emptyDir: + # medium: Memory + + # Additional volumes on the output StatefulSet definition. + volumes: + - name: nginx-home + emptyDir: {} + - name: prometheus-nginx + configMap: + name: prometheus-nginx-proxy-config + defaultMode: 438 + + # Additional VolumeMounts on the output StatefulSet definition. + volumeMounts: [] + + ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations + ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form + ## as specified in the official Prometheus documentation: + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are + ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility + ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible + ## scrape configs are going to break Prometheus after the upgrade. + ## + ## The scrape configuration example below will find master nodes, provided they have the name .*mst.*, relabel the + ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes + ## + additionalScrapeConfigs: [] + # - job_name: kube-etcd + # kubernetes_sd_configs: + # - role: node + # scheme: https + # tls_config: + # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca + # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client + # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key + # relabel_configs: + # - action: labelmap + # regex: __meta_kubernetes_node_label_(.+) + # - source_labels: [__address__] + # action: replace + # targetLabel: __address__ + # regex: ([^:;]+):(\d+) + # replacement: ${1}:2379 + # - source_labels: [__meta_kubernetes_node_name] + # action: keep + # regex: .*mst.* + # - source_labels: [__meta_kubernetes_node_name] + # action: replace + # targetLabel: node + # regex: (.*) + # replacement: ${1} + # metric_relabel_configs: + # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) + # action: labeldrop + + ## If additional scrape configurations are already deployed in a single secret file you can use this section. + ## Expected values are the secret name and key + ## Cannot be used with additionalScrapeConfigs + additionalScrapeConfigsSecret: {} + # enabled: false + # name: + # key: + + ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful + ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false' + additionalPrometheusSecretsAnnotations: {} + + ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified + ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. + ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. + ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this + ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release + ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade. + ## + additionalAlertManagerConfigs: [] + # - consul_sd_configs: + # - server: consul.dev.test:8500 + # scheme: http + # datacenter: dev + # tag_separator: ',' + # services: + # - metrics-prometheus-alertmanager + + ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended + ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the + ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. + ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the + ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel + ## configs are going to break Prometheus after the upgrade. + ## + additionalAlertRelabelConfigs: [] + # - separator: ; + # regex: prometheus_replica + # replacement: $1 + # action: labeldrop + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 1000 and gid 2000. + ## https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + + ## Priority class assigned to the Pods + ## + priorityClassName: "" + + ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment. + ## This section is experimental, it may change significantly without deprecation notice in any release. + ## This is experimental and may change significantly without backward compatibility in any release. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#thanosspec + ## + thanos: {} + + proxy: + image: + repository: rancher/mirrored-library-nginx + tag: 1.19.2-alpine + + ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. + ## if using proxy extraContainer update targetPort with proxy container port + containers: | + - name: prometheus-proxy + args: + - nginx + - -g + - daemon off; + - -c + - /nginx/nginx.conf + image: "{{ template "system_default_registry" . }}{{ .Values.prometheus.prometheusSpec.proxy.image.repository }}:{{ .Values.prometheus.prometheusSpec.proxy.image.tag }}" + ports: + - containerPort: 8081 + name: nginx-http + protocol: TCP + volumeMounts: + - mountPath: /nginx + name: prometheus-nginx + - mountPath: /var/cache/nginx + name: nginx-home + securityContext: + runAsUser: 101 + runAsGroup: 101 + + ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes + ## (permissions, dir tree) on mounted volumes before starting prometheus + initContainers: [] + + ## PortName to use for Prometheus. + ## + portName: "nginx-http" + + ## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files + ## on the file system of the Prometheus container e.g. bearer token files. + arbitraryFSAccessThroughSMs: false + + ## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor + ## or PodMonitor to true, this overrides honor_labels to false. + overrideHonorLabels: false + + ## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs. + overrideHonorTimestamps: false + + ## IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector settings from the podmonitor and servicemonitor + ## configs, and they will only discover endpoints within their current namespace. Defaults to false. + ignoreNamespaceSelectors: false + + ## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created. + ## The label value will always be the namespace of the object that is being created. + ## Disabled by default + enforcedNamespaceLabel: "" + + ## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels. + ## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair + prometheusRulesExcludedFromEnforce: [] + + ## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable, + ## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such + ## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions + ## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/) + queryLogFile: false + + ## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit + ## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall + ## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead. + enforcedSampleLimit: false + + ## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental + ## in Prometheus so it may change in any upcoming release. + allowOverlappingBlocks: false + + additionalRulesForClusterRole: [] + # - apiGroups: [ "" ] + # resources: + # - nodes/proxy + # verbs: [ "get", "list", "watch" ] + + additionalServiceMonitors: [] + ## Name of the ServiceMonitor to create + ## + # - name: "" + + ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from + ## the chart + ## + # additionalLabels: {} + + ## Service label for use in assembling a job name of the form