From 7d8c8c35ab3a0aa84524728461bd6aa3de066b17 Mon Sep 17 00:00:00 2001 From: Samuel Attwood Date: Tue, 26 Apr 2022 14:55:57 -0400 Subject: [PATCH] Updating assets, charts, and index.yaml --- .../hpe-csi-driver/hpe-csi-driver-2.1.1.tgz | Bin 0 -> 15305 bytes assets/k10/k10-4.5.1400.tgz | Bin 0 -> 114036 bytes .../hpe-csi-driver/2.1.1/Chart.yaml | 27 + .../hpe-csi-driver/2.1.1/README.md | 154 + .../hpe-csi-driver/2.1.1/app-readme.md | 3 + .../2.1.1/crds/hpe-nodeinfo-crd.yaml | 70 + .../crds/hpereplicated_deviceinfo_v2_crd.yaml | 115 + .../crds/hpevolumegroupinfos_v2_crd.yaml | 124 + .../2.1.1/crds/hpevolumeinfos_v2_crd.yaml | 68 + .../2.1.1/crds/snapshotgroupinfos_v2_crd.yaml | 112 + .../storage.hpe.com_snapshotgroupclasses.yaml | 60 + ...storage.hpe.com_snapshotgroupcontents.yaml | 104 + .../crds/storage.hpe.com_snapshotgroups.yaml | 83 + .../storage.hpe.com_volumegroupclasses.yaml | 60 + .../storage.hpe.com_volumegroupcontents.yaml | 96 + .../crds/storage.hpe.com_volumegroups.yaml | 69 + .../hpe-csi-driver/2.1.1/files/config.json | 128 + .../hpe-csi-driver/2.1.1/questions.yml | 87 + .../hpe-csi-driver/2.1.1/templates/NOTES.txt | 0 .../2.1.1/templates/_helpers.tpl | 32 + .../2.1.1/templates/csi-driver-crd.yaml | 24 + .../2.1.1/templates/hpe-csi-controller.yaml | 240 ++ .../2.1.1/templates/hpe-csi-node.yaml | 201 ++ .../2.1.1/templates/hpe-csi-rbac.yaml | 565 +++ .../2.1.1/templates/hpe-linux-config.yaml | 13 + .../2.1.1/templates/nimble-csp.yaml | 87 + .../2.1.1/templates/primera-3par-csp.yaml | 94 + .../hpe-csi-driver/2.1.1/values.schema.json | 159 + .../hpe-csi-driver/2.1.1/values.yaml | 34 + charts/k10/k10/4.5.1400/Chart.yaml | 15 + charts/k10/k10/4.5.1400/README.md | 227 ++ charts/k10/k10/4.5.1400/app-readme.md | 5 + .../k10/4.5.1400/charts/grafana/.helmignore | 23 + .../k10/4.5.1400/charts/grafana/Chart.yaml | 22 + .../k10/k10/4.5.1400/charts/grafana/README.md | 528 +++ .../charts/grafana/templates/NOTES.txt | 54 + .../charts/grafana/templates/_definitions.tpl | 3 + .../charts/grafana/templates/_helpers.tpl | 235 ++ .../charts/grafana/templates/_pod.tpl | 509 +++ .../charts/grafana/templates/clusterrole.yaml | 27 + .../grafana/templates/clusterrolebinding.yaml | 26 + .../configmap-dashboard-provider.yaml | 31 + .../charts/grafana/templates/configmap.yaml | 99 + .../templates/dashboards-json-configmap.yaml | 37 + .../charts/grafana/templates/deployment.yaml | 52 + .../grafana/templates/headless-service.yaml | 20 + .../charts/grafana/templates/hpa.yaml | 22 + .../templates/image-renderer-deployment.yaml | 117 + .../image-renderer-network-policy.yaml | 78 + .../templates/image-renderer-service.yaml | 32 + .../charts/grafana/templates/ingress.yaml | 80 + .../grafana/templates/networkpolicy.yaml | 18 + .../templates/poddisruptionbudget.yaml | 24 + .../grafana/templates/podsecuritypolicy.yaml | 51 + .../charts/grafana/templates/pvc.yaml | 33 + .../charts/grafana/templates/role.yaml | 34 + .../charts/grafana/templates/rolebinding.yaml | 27 + .../charts/grafana/templates/secret-env.yaml | 16 + .../charts/grafana/templates/secret.yaml | 28 + .../charts/grafana/templates/service.yaml | 59 + .../grafana/templates/serviceaccount.yaml | 15 + .../grafana/templates/servicemonitor.yaml | 42 + .../charts/grafana/templates/statefulset.yaml | 55 + .../k10/4.5.1400/charts/grafana/values.yaml | 3126 +++++++++++++++++ .../k10/4.5.1400/charts/prometheus/Chart.yaml | 30 + .../k10/4.5.1400/charts/prometheus/README.md | 224 ++ .../charts/prometheus/templates/NOTES.txt | 112 + .../prometheus/templates/_definitions.tpl | 3 + .../charts/prometheus/templates/_helpers.tpl | 400 +++ .../templates/alertmanager/clusterrole.yaml | 21 + .../alertmanager/clusterrolebinding.yaml | 20 + .../prometheus/templates/alertmanager/cm.yaml | 19 + .../templates/alertmanager/deploy.yaml | 161 + .../templates/alertmanager/headless-svc.yaml | 31 + .../templates/alertmanager/ingress.yaml | 57 + .../templates/alertmanager/netpol.yaml | 20 + .../templates/alertmanager/pdb.yaml | 14 + .../templates/alertmanager/psp.yaml | 46 + .../templates/alertmanager/pvc.yaml | 39 + .../templates/alertmanager/role.yaml | 24 + .../templates/alertmanager/rolebinding.yaml | 23 + .../templates/alertmanager/service.yaml | 53 + .../alertmanager/serviceaccount.yaml | 11 + .../templates/alertmanager/sts.yaml | 187 + .../templates/node-exporter/daemonset.yaml | 146 + .../templates/node-exporter/psp.yaml | 55 + .../templates/node-exporter/role.yaml | 17 + .../templates/node-exporter/rolebinding.yaml | 19 + .../node-exporter/serviceaccount.yaml | 11 + .../templates/node-exporter/svc.yaml | 47 + .../templates/pushgateway/clusterrole.yaml | 21 + .../pushgateway/clusterrolebinding.yaml | 16 + .../templates/pushgateway/deploy.yaml | 119 + .../templates/pushgateway/ingress.yaml | 54 + .../templates/pushgateway/netpol.yaml | 20 + .../prometheus/templates/pushgateway/pdb.yaml | 14 + .../prometheus/templates/pushgateway/psp.yaml | 42 + .../prometheus/templates/pushgateway/pvc.yaml | 37 + .../templates/pushgateway/service.yaml | 41 + .../templates/pushgateway/serviceaccount.yaml | 11 + .../templates/server/clusterrole.yaml | 48 + .../templates/server/clusterrolebinding.yaml | 16 + .../prometheus/templates/server/cm.yaml | 82 + .../prometheus/templates/server/deploy.yaml | 261 ++ .../templates/server/headless-svc.yaml | 37 + .../prometheus/templates/server/ingress.yaml | 59 + .../prometheus/templates/server/netpol.yaml | 18 + .../prometheus/templates/server/pdb.yaml | 14 + .../prometheus/templates/server/psp.yaml | 51 + .../prometheus/templates/server/pvc.yaml | 41 + .../templates/server/rolebinding.yaml | 20 + .../prometheus/templates/server/service.yaml | 60 + .../templates/server/serviceaccount.yaml | 13 + .../prometheus/templates/server/sts.yaml | 285 ++ .../prometheus/templates/server/vpa.yaml | 24 + .../4.5.1400/charts/prometheus/values.yaml | 1737 +++++++++ charts/k10/k10/4.5.1400/config.json | 0 charts/k10/k10/4.5.1400/eula.txt | 459 +++ charts/k10/k10/4.5.1400/files/favicon.png | Bin 0 -> 1802 bytes charts/k10/k10/4.5.1400/files/kasten-logo.svg | 24 + charts/k10/k10/4.5.1400/files/styles.css | 113 + charts/k10/k10/4.5.1400/license | 1 + charts/k10/k10/4.5.1400/questions.yaml | 295 ++ charts/k10/k10/4.5.1400/templates/NOTES.txt | 47 + .../k10/4.5.1400/templates/_definitions.tpl | 184 + .../k10/k10/4.5.1400/templates/_helpers.tpl | 647 ++++ .../k10/4.5.1400/templates/_k10_container.tpl | 659 ++++ .../k10/4.5.1400/templates/_k10_metering.tpl | 261 ++ .../4.5.1400/templates/_k10_serviceimage.tpl | 51 + .../k10/4.5.1400/templates/_k10_template.tpl | 190 + .../4.5.1400/templates/api-tls-secrets.yaml | 13 + .../k10/4.5.1400/templates/apiservice.yaml | 25 + .../k10/4.5.1400/templates/daemonsets.yaml | 26 + .../k10/4.5.1400/templates/deployments.yaml | 30 + .../templates/fluentbit-configmap.yaml | 34 + .../k10/4.5.1400/templates/gateway-ext.yaml | 33 + .../k10/k10/4.5.1400/templates/gateway.yaml | 144 + .../k10/4.5.1400/templates/grafana-scc.yaml | 44 + .../k10/k10/4.5.1400/templates/ingress.yaml | 46 + .../k10/4.5.1400/templates/k10-config.yaml | 230 ++ .../k10/k10/4.5.1400/templates/k10-eula.yaml | 21 + .../4.5.1400/templates/kopia-tls-certs.yaml | 33 + .../k10/k10/4.5.1400/templates/license.yaml | 25 + .../4.5.1400/templates/mutatingwebhook.yaml | 51 + .../k10/4.5.1400/templates/networkpolicy.yaml | 192 + .../templates/prometheus-configmap.yaml | 70 + .../templates/prometheus-service.yaml | 45 + charts/k10/k10/4.5.1400/templates/rbac.yaml | 239 ++ charts/k10/k10/4.5.1400/templates/route.yaml | 36 + charts/k10/k10/4.5.1400/templates/scc.yaml | 43 + .../k10/k10/4.5.1400/templates/secrets.yaml | 242 ++ .../4.5.1400/templates/serviceaccount.yaml | 27 + .../k10/4.5.1400/templates/v0services.yaml | 165 + charts/k10/k10/4.5.1400/triallicense | 1 + charts/k10/k10/4.5.1400/values.schema.json | 1089 ++++++ charts/k10/k10/4.5.1400/values.yaml | 456 +++ index.yaml | 50 + 157 files changed, 19851 insertions(+) create mode 100644 assets/hpe-csi-driver/hpe-csi-driver-2.1.1.tgz create mode 100644 assets/k10/k10-4.5.1400.tgz create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/Chart.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/README.md create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/app-readme.md create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpe-nodeinfo-crd.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpereplicated_deviceinfo_v2_crd.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpevolumegroupinfos_v2_crd.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpevolumeinfos_v2_crd.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/snapshotgroupinfos_v2_crd.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_snapshotgroupclasses.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_snapshotgroupcontents.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_snapshotgroups.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_volumegroupclasses.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_volumegroupcontents.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_volumegroups.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/files/config.json create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/questions.yml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/NOTES.txt create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/_helpers.tpl create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/csi-driver-crd.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-csi-controller.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-csi-node.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-csi-rbac.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-linux-config.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/nimble-csp.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/primera-3par-csp.yaml create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/values.schema.json create mode 100644 charts/hpe-csi-driver/hpe-csi-driver/2.1.1/values.yaml create mode 100644 charts/k10/k10/4.5.1400/Chart.yaml create mode 100644 charts/k10/k10/4.5.1400/README.md create mode 100644 charts/k10/k10/4.5.1400/app-readme.md create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/.helmignore create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/Chart.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/README.md create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/NOTES.txt create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/_definitions.tpl create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/_helpers.tpl create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/_pod.tpl create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/clusterrole.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/clusterrolebinding.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/configmap-dashboard-provider.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/configmap.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/dashboards-json-configmap.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/deployment.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/headless-service.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/hpa.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/image-renderer-deployment.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/image-renderer-network-policy.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/image-renderer-service.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/ingress.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/networkpolicy.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/poddisruptionbudget.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/podsecuritypolicy.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/pvc.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/role.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/rolebinding.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/secret-env.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/secret.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/service.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/serviceaccount.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/servicemonitor.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/templates/statefulset.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/grafana/values.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/Chart.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/README.md create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/NOTES.txt create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/_definitions.tpl create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/_helpers.tpl create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/clusterrole.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/cm.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/deploy.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/headless-svc.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/ingress.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/netpol.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/pdb.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/psp.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/pvc.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/role.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/rolebinding.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/service.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/serviceaccount.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/sts.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/daemonset.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/psp.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/role.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/rolebinding.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/serviceaccount.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/svc.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/clusterrole.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/deploy.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/ingress.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/netpol.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/pdb.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/psp.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/pvc.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/service.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/serviceaccount.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/server/clusterrole.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/server/clusterrolebinding.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/server/cm.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/server/deploy.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/server/headless-svc.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/server/ingress.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/server/netpol.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/server/pdb.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/server/psp.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/server/pvc.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/server/rolebinding.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/server/service.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/server/serviceaccount.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/server/sts.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/templates/server/vpa.yaml create mode 100644 charts/k10/k10/4.5.1400/charts/prometheus/values.yaml create mode 100644 charts/k10/k10/4.5.1400/config.json create mode 100644 charts/k10/k10/4.5.1400/eula.txt create mode 100644 charts/k10/k10/4.5.1400/files/favicon.png create mode 100644 charts/k10/k10/4.5.1400/files/kasten-logo.svg create mode 100644 charts/k10/k10/4.5.1400/files/styles.css create mode 100644 charts/k10/k10/4.5.1400/license create mode 100644 charts/k10/k10/4.5.1400/questions.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/NOTES.txt create mode 100644 charts/k10/k10/4.5.1400/templates/_definitions.tpl create mode 100644 charts/k10/k10/4.5.1400/templates/_helpers.tpl create mode 100644 charts/k10/k10/4.5.1400/templates/_k10_container.tpl create mode 100644 charts/k10/k10/4.5.1400/templates/_k10_metering.tpl create mode 100644 charts/k10/k10/4.5.1400/templates/_k10_serviceimage.tpl create mode 100644 charts/k10/k10/4.5.1400/templates/_k10_template.tpl create mode 100644 charts/k10/k10/4.5.1400/templates/api-tls-secrets.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/apiservice.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/daemonsets.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/deployments.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/fluentbit-configmap.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/gateway-ext.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/gateway.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/grafana-scc.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/ingress.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/k10-config.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/k10-eula.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/kopia-tls-certs.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/license.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/mutatingwebhook.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/networkpolicy.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/prometheus-configmap.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/prometheus-service.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/rbac.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/route.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/scc.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/secrets.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/serviceaccount.yaml create mode 100644 charts/k10/k10/4.5.1400/templates/v0services.yaml create mode 100644 charts/k10/k10/4.5.1400/triallicense create mode 100644 charts/k10/k10/4.5.1400/values.schema.json create mode 100644 charts/k10/k10/4.5.1400/values.yaml diff --git a/assets/hpe-csi-driver/hpe-csi-driver-2.1.1.tgz b/assets/hpe-csi-driver/hpe-csi-driver-2.1.1.tgz new file mode 100644 index 0000000000000000000000000000000000000000..2290addfde1ebe915f738ebd10455608d1ba1e5b GIT binary patch literal 15305 zcmV;)J2u20iwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0PKBhciXm>==rQ)v9H`&jXT!EcAT_YH?zjI-PUsw$73Zu_e`c^ zLnI{Om?T&Ll%v!3zu$!iDez4_EIS?XL+gm(#@-tn`-#Ui#2tr|j?2glv)vad5!#08T^XOmXL^+b?Pq-17na z-U0yVK!AKafOG0ZJ`OZ!^#&kf-T~Sn(shs!9#$~0{ElFE03l)`P_XjNT*5;S%{v;=pRO<9Y^Nx!{kIv;N@}7^lz^vy|hxZWYSn!@1yfB!w?(zJAGM5ip9f@r>C;RiBLhpCK>8e8UEyR{y`u>HnjX!=3)$#8REoe3%jrgad-VpT2wB8B-=FBX(g-Jlt(bc#ek{h8bc6jXcaj zP#9qdrP^?T1cJhxMhpb@Lk|heyRFt9oY6oq>cQFY8itt7h=TzIE}Spsr@~FDL^B;B zWYmou!CNljvMXu90^-ZRK-q~FK*)%X8Cv{+JP!+oUiJI^+>hT`KeqPZ2g<ob>$26C^}p3L$dtB}PVnYpZ9lx}q*Vqk-a7;9v_r@zDfdN1k_0J>tv( zg`r0zD=Cv0Wye7^=dH|F18854uc)|YnBzdS6*Ij^s%v;4F182u?5ERfNyQWgf;bvW zMT+E{GezM?j@ba(ZS&7-#JN=L>ZjHoyrmQH@CGyWIQZxC>7^K zQqh|s*7L}yXWsM_2ss=-V(~NeA|G>s1cwLWc~V3jDF!Z#=HTi{aR3|(5G=w{h|PE^ z%PgCMJ;Djdp#+%>@>9?6x~EwGb7zWu)cpsi!3O%M*8hjcFAtA0`v2tQXy^ZL<@rtd z=IyWah_?sOmM+ks*OO!4F>*lJq~|g;7M=d)Yw-!Wr zShlZpY_-8rp(y;v@ck7GQ(cOAs9@01Kd>X>U(Jkd55Pnl0KX-F=@xpjwg;IP>ZLXt zZ8L8V@(qj~sc2ytmikuTS*=oy{x~SaI%7nNJ7{N2Lpy65vdyLHFXl&V3?ztl0U5S+ zXd8=BPDE@A+OaXnypOeg+fw(LCTZ>7E>!R3tV?U>9ODhXZH9l12*Z-1PpvE*3w+tA zSS_+cVZTyrw|+?2WBrz}#QH76gJOI#q^dT*82Um+Lm4Dvo*MPisS!`_z24%>UY;G0 zn%mNYTX=2CiHjY&5pwQv-~#^ac##aBaznO=jwPs5X_w76wj*f4aqr&MA=i$eyBaN*dizTTwGxTUB3JYd zY6R5&sgy>@nWZb$G)`cYeqv;4M5%{SR*sWq3!!Q>QjnII^=K|1y*$y^HEkb{+O*`a zM{%`K=BYl_?V<96M<@Ne4U+!dvZPmjup!bbT?6F0M5=q_UE8Xl&^|+@L)6@Km7DD} zqs~ph7f=_2Q~p85YU$5NFv)!=9bIX%R0L^abmaOZkft69%4C>nY#GE_J5DZWvxfs7 zF%yGQCIevI3%AXojA;aRCA5fRz@G(X0rFtvQRhBRpOo6i;_)sW=RYdEo<^fm?kYjX zvA6^~O8{Zyc@P$VWN6`nkzi6zN~M2{gUFXQQ#v9KXwKUQ+4j)B!ZXa;@Jlf}3LWfX zCS;t#H0fk0iC5_xhhY~7f*_ASwu4g>1VDWt084u#NuPB=$69WMMcER1S7-gKA1aWA zwCKu_i@2PHeyvYYD6>f`OLl8{o#)#6^d_Jh(Hz#a&Zy4sv3S5ZfWsrD><(Y4yfbnH zW?5-pgKcH5IucS($|nEw)$jlF33dK?+WD`3=evKs-GA}T|NP5Wd;k7_e);Y2=-}|x zA72&lezSJYikTazkAe^~JsLwSl+o~#6fPYX)fjdDb!By06x}b>sD3Ly7)2BLhhRvW zQ)oXR7Rdi&#%QTJwbBF)SW*x{r&%IRj-eOEjQW7UCF3z>GHi{%1~ElJ98%U8{^8}1 zaoiBY4KQ}&3W!f6aoE+J-G9rw6NhyPm9NE=@I{|wtbYg*Lq3*X0^xEd1QYC*n(f-L zNfPNryv^lT8gRHYACcyBaninqm+u2_Zb;U=AX;4SGr(YqIEeFE1wv8 zNJtkfNfWTunV8KwWt-Ycq*u)9s=U;4PHg7$IzLvjMNah~_o=F9)Qrew05ihE{0w4S*+hjP5oc}ovr`$k9YN-Z9Jvpe>=rqh#BvS zaNVUKHRFHOKRL;a|Erg~^?%!We*3NW4a|r?P?hd6@vzjuZ+yuXXNm{#P4ACCTC!E^ z;&Ui{a;dpx6chW8YGFvXW!iP5t3(0}?O3wnY=eq?ptESDR-JV9iqk6zu=}&FKFgNx zXP7bKV)z{divkB;9jiZyKa9p>@)_El1uz`A>Tiwb8N<@rprns+`FO2VTel?VBr9I>! zcJ{li%Q2XBgBplgoyX1EKlK|C$x4KXOFTIsg}lMGmc_Ddn$2XKa5a9Ja5ipJ+Mp5Z zJ-&eSl_F#CrupbvcqSatP`WpoUHan$*VL~|BN#Fgh%vPPg?Ijiw=>Xmzsty5+21KS zv<6sCAG2;y4z3zxBQd&_hep_IFwm-XyOb=O>-ulA_!52xhC>fI7~22ZhW2fHDSDq; zS?!*UKc)76@ngqf?piBC>(+lBzj~Rm|F2J8@78~A*49#$ zy}-Xh_Y8$-L_8v+sqSAF>%6<~&_5`H_F;P;Ucdwcyhy2+e{ENNO9IMZAKHiCw)f$W zKT^x;V=*#NN-7U(e1pxQ8Om3s!sdlI+%1eaw(&kTtwt{Q1;#?m0cB`agaVuYbmDWgAef|BsIP{mlB$qyEd2 zlb!zG##5~PtDaPAh}Z3Vn8meOC~aL2+o-C=TCbFXMPbOLzR&BgytJBEJVXv2z;C~$ zoqhGQ)C#e(lWs9_5Ffx{3xH!!C8-B0bLb=COy5e^oDWQz5rtt%r|}FjpIVs8$4fz} zf}}#z;PHz~h||X5YgPgmLjVBBnp-PZkZ&XfP&| z1^;Q7Y<-NMcn4$PQo&Q%XSFzfAlYJmUTm>j0O%|%YA0lLMkG_L5lxRm92CcupM5^J55Eb2&3qkOv+JKZ=Xwy>zl z#U`ndm4nmnsM&#BmtoG|eD&h={QTx(INWdN8_wRGq<`J={5j4Jx4}ygAJa&@IqvfU ztmXW`tRtojBbUE9e3hpm$0;1lvW(JFY`BxDb(@Khl)@ZNdYn4^R$-mn@OWZio6EevPQe2#R(Cl}nl%T#d8Fq~j8nLED!fT_Z8AN4;=5BcXDl zPd1n1{-yZ8+{#35(Bo!lXgGy)Gqdr;wg(I-OAV1Jtai=9SzQ_2FKl8hw~U+Zmjl$R zkCn};1oujU)2*s!{^7UTslS`nD^F`4ppZ|g5Z27zuBJcgXO+QL^@_!S!R(~l?;dVJ zwKVH=IykxA)(wg?c#-9C!QOY3}`@!sROMX&)ha08fkprFG6f1KZ3b@~a!ZT=61Gc8LO+%bN zfMzh$2hXG>qfWUm0jfyIpg_uS;D(e0;>}kt&W4w_7gy)k?=P=D6v^4xLo>?W+$Rev z3fhF!?f5hxg0kwj9TEu4X2^SU+;1ekHg?4#90la_px0YPk{8{)egEU_+l!ws-WK;^ zjd8qYyI{n=pgCa!_UNPuR5~gA`uhyMUd`B+KJ=l9&~{=Xaw(zb?rL%v%rD$U-%blf_hcLHeoah z3**>F61!Z7Qec({;mQTDDde3fK4(#^EPOnMD5i=78x_TR$U9MdW}>(t>(PWS-HjBC zqqRh?5j-Vm0IP~x6GweJ&U1y}{jc+*V-eQ96%>*SJHdV^=IWHY-}0eo-V2xBszN7) zXISi#7rWz?AWD4mD!;rIUXhbt;h;IJCt#=-9&H0Zf$A_=8>%WhVk9O<-w(%^p{x7rg?~1y3tu=qiYCnX1 zsgssn>?cKY$z^`aE$lNLtg)oeK+w7nH8(~Z^fS#`I6Pi|cn zcsAv-s!qheuBa1_ik4>T;rZ*w?=Nnz-k)FGUY)+XC}O}5jQWGj3((t&Z}2$(M%t9r zts?mb>SG;nN}JM^Q%P#)9FKCW%bzB1&N^Ev64nD)6A8ywFZ*$7&Q*i3n-+zcg(cYS z)nGSSgthdXc{1v5&0?(fDYVnEY^`#JGZ= z4y@Rqbb|g&Zu(iL@Icofxw?dCRNACjCgp0-Be_tAICDu z**S?^cL1N-!@m!2PtV_7UbTNoH9($6AFgBI7u-SmdX`)-k8I^a>hg|im;bS40bz5zb$2xtvSX$Yd-dsE@ z{CK@ZHCQ+uE0taYsmbaX39)1$&eAJ(_`C{0_H8nBQJ|9!Zk4MT1-+tH78&(GBnhUK z`G4^cks$iq(KU|Fn0Q&Swn5TRr0$f{n?pnts4M%%WTI|N$pGNdNPnJhQd&5waI1@o zC26};txR-TrAfSa8#~dpPI9^)l@AU}A(^u1;zmI+;%&9sU#s|9s{hw15f`fT zN`OWBy;8T=qF18mi)~dUG`ndN8=-%Fhr%?pMOwrD*fiM{vv7}QR}}i z`>#)O=YJg?zTVY;xACa@uP(z5Qw7NEP1a4$T+t=UCiU2s)vqb|-B1swI#rT~A~B_m z{8P&UNibgTuN9VTJ2$cs;kLLvE)(J15t_|!Ie0*2%JqTy4UV~1hE&8rz^BlYtH ztGhlt!i9R(08+w*N?-@jc9f^p7%X+GAvT0#62C|f_GWR^X39rl8QA5(g~{<=Ce*1x z_5lfJYIURT8}P9HD+=8;x^_H7{9ONwe#$U0y)H0WZj>IWM-(O_lW`H>l9r)}+v)yN zx#lL5lDc&gv5x3z67)GGg>#@@^27M21dkGX_dN+*5==I2_C^LQkJLp_#`qOl`E;03 zd~jG3gDx$tsh8!gdaa3uWRNQ8e@v$#K9**Ak6~r&l-DbpaR90m;ufWUGrjk?Kq6WJ zi6Yy-6rZa-!KBATXFlBml(g@~iIfl$jA?TzS>qU#l?ga5v1)2fK&_yxNT!uFTv|3w zmrHWhocsIe>OWCn&t3PYHEl!1|3og=cUshbO}HX2aXsP{i#MDd8eeNndy zli1jCSQ(!Z3tlmln~pYC;J~8{O^ja&M^*@}9|z6#W(W$2wY#&zoJL$wKmJKwbfRu& z+e~)n@eQu6!JoU$%~_jqiE;w0O;61p}r(uB~aRDO?`Z7Zb50u@*0})fW{`rg_ z!1d4k0ETD0yIG?hU!NVH%JnD~ds(Zq?YMId%e__))%Ih(aPwLF1dpe}tOMq>C_gv7 zRXpLANfI^D8B(*U#qvM`a3ZFz{Y_|S?MZd_Lh3sjcyl1*eef`HG2>Gbg4#z&XaIx* z4kU2bjMaMDrjCE5EON$^QkCRT%3KoIeHRI>>aJPFqsZ#og&kXXA4zHz#SM3VKB0V9 z3$~`ASq!Xgp}8$VZ6=lS%%d=*OqgYW>dczQ30bWUstsVQcg;eya*tENTEX9|O0=(u zO_Pen_K{k`K5Yx9Wizu|9lh<~w#BOdGtJXHUg3MwzTmdH9?@+`b+NYNkb?FMj86)m=i@x2(RwcKLwfL@ zjRR3QhC3Hx=R)jUh%elQ*h*-+-I#N$!Q^(Ub5C6M`ZB6>kF_HAOQ`SGOYSxce_ux2 zP~d(q6*-s-0iLS#CP^?T?5b{SSKXenBKC#2V@m8FtLPR>@`jbhp1QqSY6mqT0~&N( zJVudcm9x}eja5uD9g~}!8`dmebb-!h&NHmdZM-yR3&Qg(&DGTAvIEvknCln$Zo=n? z55-(Fdo~U6a;v5vHSaPFu{H(I2Op_NiKE6vpXTs^BJmwZQD#Qzol`t^B2PAl2vs6L*@A&_Ng;#oB>jO{zt!ma&nSB|Kq5C zczn1!|6?1E74F7o5H*v$%!x!zb?kJ;7{P?0@yxQjdw3x;0x`~Ac(+1uNLQ~f)r7iYuk*l@w#-d@_2 z)oT)~4{gh4PUAd_eesIAbfpMbe8WmeS?uA+KL_w_|66@#S;(lM4)q4`;p{qhMT`Or zi^)*5C#Hyb`io=>%ibPb>0j1l*aDOFc$I;(5jK@@DP|L8@S;Y}_KRC+!232gB^wv# z)L5K6PkwB8n~;U|W85W_0&nsQd;o{5-7A!W*7W|M@VQ?dWR|DZ-X2tSS5JmBDRMV- z@q-c6j#4lva65}W*S`s3dEgsAo;{iTi>I^T`E^iqwg{g>JJzkj?t|8px(z5oB6@&7aK|96}7 z{=eI#_y66-y?-+k;)zhWExw3<<~y5r*9%bV@-Bnbdb}$F_9(n7U|mgE@LhRWV1VzZ zqSMU<23g$e`SvIQM81!;qli4nT_|B+a!|mY6wMosCzQ&H?X7hE>@>b8hY z#u(){$K9OYp1r-ixcZ>(`MUh^_CK%7?yEXJDZXYW599lfH^Y*P=-Nm9_AdC@1wa40 zgP)B5e{*qq{_djdyPNEz+W$W~eEBk)|35xHIobLD+j#cir|Szi8(zY>zR7(|8T>Vw zaPSlMd~l|SiB{{w6vL;==D)m{3K8-_ug4wgcBdhB@k~xyhm%_)VkB_!=k8Sa-adGE zhNYd7{XBSB2)IVhJz_3g1Ol^=5stywGLT-*m@=P(prDQrb}^{y{6-#De#l}~O7fA! zj|(L5IP#?P6EE-L-BwFENOuJ^Z%Sfp?S( zyM-&wI~hd^G9ZTI9k$qQb>Id&5~Yu$C=k&B4BuW158w<3;{8y1zc)W!yak8)VI;6p zcMNEa%&iVYX&Ag&M(e}*ZV*? zjIiu7;@CX^DyEo0XpevzMT4a}{}+5TSHm_x03JD0Ks+Z8@P4Si8)GCQhULh1w4A0Y zRkDex!5H#Zp=ui9Tb-GW-`Rv_JC<(OK^*ld434i)Z&vMB`fRtxiA(h=C0Up6BW>tZ zS0vy9dHS+TJ$c8}^XP*d0iouMCoQGAUeSramOqDxAs-9OIMA_vEfs(Qs)aFLUOv;F zU#s;yT*n_l_W2#oF?SeI*YxM!F7Ze}y5Gf5t=~K4Pu07no&rF>w|<9Au`i9Pey7xH zTI^JadS?RtyKWuLVpvFN>sWpBT6?=?N!)#0L^b5-J|6gTbpySkpg z+jP&_ON&!IqexI63F4&Yot!{qcs9K3y^B0SqZI^4K59Ut~n2?7CvCTH7PJKgEGC#=XHfx7*pDQavvg1H~UCzdTCM$ra`WAH1a# z@Jgz9o4QF)7iqj)x|CmK!ikeEdMvffP)!fG*C&- zMdQT9(r78#x^PY(0*@kh0qYY2j{H!fLOlEkbas`;!Lf3NC4M9Mhff0HM}Fdn+OCG2 zS4cW}4(42X>sA(5P(Xp1bvEH-Tr>$K7vdhet=8S$T}#dEK?@*OD+6f%-^eJFe`}i; z@f?>gL-Y5)wH1KYhX;)A)y3Axb&0<0*+yTTs#7pFk}58}T-L zE0jwX4`QqJfyPF}8d|j_PX1HrzdL{4NyqMYgI4R*b=4e|gQB8>fhIrsw?rPSk%f5R zt>3yv2$Vyb8g~nh-nF4wb7|qKiibj@Y&w@S&k|rhJJpaMO^ zj=G24eyjC89#e)BDblYAUx1S_v`9sD3XuR~SJ61aW|q2LkQ3q3mux|S7<}`MltCGO zYyDgvvQnm^2DrgK6&TKFI0wDwCJ7m_3sU|ipLFfzz+j~1E5M%xaDhYq%ZojYPlq{^ zj&w%$VWf#nXwpuU=8a^IYz{YPKf+}&rqZMku7Ii88+daFurwEl&E*_^EQ-2mb_F!R z2M{vq;3Pw15xSt$VJ?@J@N9~LiFRm|BBX(kjY(j>T~r4jR44McjH^XBB*WfS?vYQW zo~6F8g}QC&!ge>Kc@<&6vFOBocRKNg>P{!AS-!Cqvr?b>8gPZsf6x&`Jk}&5EJs*& zj{=t;m=uzl1U_}ic&=KtOD)l2(d#sfh zdNB4$T5oe`OP#-BTotMJdof16*`b_KJ%{ zNei3mUMhf$_?gM59gk1RL`zu&`HA(SBc`~cUhHS5qU<11O$=v4U8NqCz#f>Piw9eN z_?Tm>+)#xx&-e*F$%bBfv+?ch=+5GOGYXu}wsrdtKr?n*!pf@|aDoT)k zM#^1G5HRlCsizJQS7m?%H7usom2Nmlrwbg!G1nfZFQ=(-*pO$}O_ZNX<&RD(aXceP zvq#c@2WM|DP3Y~-yD$u~BUL*_&UE1|Tk7ww)8SZ1&HhID+Mf=RKYzIc#3oT3&fVF5 z|DCdE9Bca_(7ke_9PHx2(SA7KKrkZ73LZN`@4^q10sgGQV_O(D5v2Tg3qbQIo#m1y z_ziM>5)?Lx1JgI3%@a#Jd{n$ID10e*by1>fwJy{lDB#Wm#qYz67c^4ug<^55vk)cQ99N)kgX|;}-ZRWJgc=!jtdD;1(QAGy0_?d8YVTIThsGCxYSWHYD`+7drva=Ny6(rbEcRJ zGi%Q6uSzl6*OB9ao|{wbh2zLGp86R1#3P95ar;y-8V(H{914rz^tuQgrw{A zBIc^AaT*aihB=Lx?mVtCGU`T-;0Hhg$BU9sdO>Q})ZctR3W%7uTGu*@r!svgm;>QF z!u$Y!jWE|0V%fE>R;ol5=7P(fRkd@9W-2UaOQ+>@Kl|{))xmXCNOMdq34V=;(y0|> z9{|S~J}tp&yN~@Y{M}H6JWk;slmsAq8hXgNS8lRmnM%M)C8C61qA{l4JqO35++-?< z6j>_OO&9)#K?YPnBXu&=-JY(mnMrzg0C#)JXEy%x-GNSo?=1xNt_wpl2_Omq3HZb% zItz*{O4BcT`wEgrCnQiGnm4+cLwzfIl6lihslAI2Vx8CD#mAr4dyReq`DPe~_S-*n-f^b%Yw z5b?BuJ$rxk;pX!Dj~_0te(d`0Iz~1FN^{{Yad5z~iRV=!4ol}HVh#pQ$KpXcN1kqF zhgYW|a;7-`Ugbo<%r+&bZ!ga-u7(%uAZ$QrIW|n`{&|;(Wn69)hH8DEk2kmmtnU1W z{q_Pylrtus}7 zx-q4v^Q7{$*vA7cFySi=J`f*sf&6d)LF9S%K{m;Oh~3PQcY2$Sgfh{mIA!0P%{~LY zKSHDR)AfbqhN@P`Uqdfq$V+$B0^kk}adMVa3zE8)0TwU&pT!$9_TWNBMR766B%sZ? zraiMtNJUsfU3?mQXwtQ|^I&%Xf<-ui_dd{yRaH@FuqZUe=dt>T-RrcJ^t@uF8yCG| z(E8^TZ0=04j|PivBhtG&y}tbUc$j^o_K<~`36k(Jer!?5?4NKK%Lxb2mXHnVssM>K z!Uj?78hmtKO|1+F^@5`$YmEYv$>_E-J&Cy-CN7wC*XUL|SD8{?J8Y&l-CNs3c(JyQm=$Pt(kE{LPTLZs$*EV*&4ss#$#VRyv^aNiVXFNR-(SNs|=hUU4q0TvIG-7kZA{K+%GK8i?FQTq7t@;pwj zm!esT%ELpr)Cm9WZ`aRYh~u?T1#mlxe+56sBV4F>w0>&U_RvxKs8#a0E~ctakmKM` zVD~C#P}}X49aaBKs;7JapMGg+D}>#j6C3*Jm#22CzL;m#lG`$URQvyj{lmkf%=r&T z@_#%3e+$nhJ?>@>ckvn9#ol+l^PTp+Zx*o673?BMychw!34ThMxI)$z+>ZLfC9O)Y z{R?&4)x!U@*Iw$hmud0E&wHOci;7eyUI*8Sg8P6z1f4O#p374NLE2wT*^X(53r924 zEMs|N!zQNu(P}-;%OZidk+7xiva8izLF(zl% zKRyB?!vKXVAR|9}ER0lzzcE6(d5p)xz0fFC4fhf_y`wH}jFF{XKz_JMwj>KW8)LIr z0!qiAuOV0up~NLG1=F!6Ty3MXu$=NviuQ)h5`xpV^XN;lo%w{<8m4n+IqQfrpYAMY zF_@j@EJI{xId_(GIfT9#%URYu&aCAUYwPFLO1o_BYVKd;v%=nlrTeJM|MgF@`M<-X zSG)Tkw(@L0|Cbf5&1C-c=@)7vUs294+i5o22X?w|HX(o7Y#*d!mRNnqdi)t={W2rj zJZjNnti>i(bQ_yJpS|QDCh0lx?xTk*sy104s}$BID}t1q_fpVSZ?|mF`8zXv6_?b) z35$*Oky1g6co&(uo39>)q4^*lI<=ofuK*72;~lny*JD;0qHQSDDjU%Px)vkKQxrCJ zPfOb=&9jVuydECq4a`Cn^%OMA9cNmKq@MKF(EHdQVWyU&HyisW69|pj`dv-l3c<=MXfmzS>1xPe1ES61W=<~vp`%BsNp&s+t5#!g>u9GhD` zp2>dd6UTR^FY`Qugm#HPQ-DK~(Vk`Vnv2si32%~)U2lc&hEklIIX|zSvIEgeArYbG(mXfWa!dJuaC2Vw*-$fVg{G_P0_1ju@vL7cm!2TObaIcF7(=&qn$Y_ zWXR4O6)<3Dj&|l~2|SiCN9C=aUl!zl?^BrnNypo_sleIs5a;If^@e@ap8xkcTmL&g zdbP{{Y~|^6IvYK8FUj;23Qbv}M_cHyT*$IlgLA?s7YP2D3dBmd%C55ZlN|_pso&BB4dJi>MlCvlRy->n0fQG z0~WjEe9i^-A>x?B!xRVhQUBI%iGaD8(f2>^k$ zGBGhR<_x|9;MfLlZeUv6TC^~Y#U76(-mOwYx85m&8IzbgQ?ONLAX_#n`ko4xCS@%9zA4^yn}(?%Ko>C0F$2*#0!m@*no^tEP1!ZoFl=gN>SzNAD+3K7}u%p)2F zZW@9V1M^ikzCr-I=Ei6>i0vCJ5DD5cn9RUypQ|t1-9;lGqrgaZ9$QVMzF$pn2&Z#S z9OPx7rdCq|4Xg>0ofZDA zZorGq=xl`DLP@7_T-+&oT@kgFE*DIZXc+=U=sliyG?hLI^%V=5L5cNxro+tTtcih0 zuS$;Z?y+Vz4biXq{-eby;YIApgs=IH*PV>QvtC$Y*D8gY%U`9kPEYN7J%?v`|6eP& zWdeYj{lAB=ULR-u|Cc-ee>+d7(}8US0I}3;Jp!=&pRyQ0H>rsN3;~s4K+@NC0|Dc1 zEKHF;vBVJ(@}NzFj!XD`JFT`YZEP$V(MQvk`b3-XvcG;jH(Qr4SmLyrdp%Wlc@mI+ zCa`&Yub953Ih$Dz)CGHVi*y9{jkc7z=#C;0#0>j>I`{gNhMP~4ap!-i_pM6!R%d!EQ@o4Z?v=g#;4#y?I(rv!%)U+%W$Ahx%HgP=FLpV^(%y7xVugi`beVEmaB18chGr@(Pp2Jb3x?To zo5jaWDTyPqVpvB+s62aN1T3GjvQ{9?1LPA2dEOiZy2pXa%XPPtW6HR;DlS!_`lK9tDPhJp-$#oGEZo~Z`ea=dL?PiQB~e>A%YoA555PqN7k5@6#^Dt4I)e48I63Vzf?2#*sH`q> zja4qRurPw$e9YW|9}w{h>RhGdd^u69guCX)@rZH4bBCpx~{YFW9h9QsU07Me2t$a zcYGvEwz5ER8f?W*k83)#PP~>s;gp}_JvBZ3}Jt3tLZBck#QMl{a z0@)zYND9UtwHkUqOM46z?ineP2O!U9N6#!D_n;8pJnH zw5HxZ&0zJt1y#EXY6GA`c&qO&SdSBH?ky_I}I|p=DsQ~4p$vkmEUQQ8T^&@87#TepteR{dAQmv z6P=cwr&U@C&Lv#Z3utYIl_YRI}P%1s`nYJvD2Vh< zZ1*5EAg$3pgh$$m@O<|oG+v+;&wo5TIX>Ct|F-dLCI7eW#9w*FuPW77k>lIWvj0pPp>ENC8bAH) zmi&JKOa99evX8Rhzgf6ivu-)FI(V^g*)IC!lb{vrB&AeI7swpFe&!+KH`#1h{-Mea zWoysZKc5wEKC}=oTj*Q0^>Dd>d{!9P)=*mfQBi?C-_rJk)47=qu5PY%rM9#{xpXL> z)AHoZ7<@U4lZ&iO{Opz{fALSo{~r?%tH5teCf$E<8f>DE+Vda#uhahj;mPsN|KHB@ zNs0Y$$rNcj(*LGx-X1`kaEFuj!Qv&yGt7ukFCP#KNHDQqTbe|+c!z=rc~YG)p*OgZ z;o0y~T^hrI1~8&jsPo=ngn?sKi|%3J^i;?CziwAbquoog07>>?fZOjGcBt>;z(p}4 zzCuJ_DDlU^BT{^WLk~IF$AP$&+L=b`WQ4;$aO}{)RjaQO{LEEAW5flP|05oqVJ1Jg z?7f%9!H+yaLL{b=S?d3?QN%D`lWQ!GXEg^8`&;1NI}#8-^1-7M5`fLWlQRA(hfYb= znjBGmHsr<07J1P~iWCCAaY{JNy z0>@khvSh4Rno8G7PbLq;+mBaYugjnqk!r@A93E{^RDO+cgl}CON_{bgqmx&q{Ly;Y zaH%IX%bNVK`mScl(aEcA2}=<9BdtyipcT|7pSC%SMswie?q zAgR3x7^|;jJn1yxo&t7t_D*#h1#(yqAoMWe7@R2zCK!AY+?qaaL(Fbf44Oa;9A5!a z%PJUFwhsbNC_k1s7)ZbcMsE4MW-)YLGo_mhQevLU2FhVX5C?e`gB3FBMved^bU_6d z42Wc^pys&Qgc(Wi)sksCs3!Nn+ZyJ58&gbZHMz3$ zZ4oNP|JI$}DuP|C#8j{T-z3db#M}qO@T~z|H{6FH@!8OD*Q;pa``9His+JDi2s!tX zNR_nQ91mj9)wCz5Y)b%=4!yae9!QU>l|BpwT%7`)@@(ZyXIQYgY(v%RKlz>?=#1~1 z!}VCi#Vw(?va?&^*G!W}e%>Znq^xIN7?iy}JW1TXgV@o#mHpjnF6lft9m8>d9nPeH zR5Dpc~belIj?4o!UrY)Zd=9nBl;stcj7lBL2w3%Q8X3 zucKbW-X7I@$*!9HfEpV^;&u3k9d^p&1^rMt-}}>&u%5Cddp(4og417etEL|Unc*nvIna2KVN#8N9j|u z{^O*7n9cvbeD!j7{=-%t4~?*=>SKg<848>!W?cs}LB<5TYU8K~Fl!}(F=%yQh8RI3 z4-bI&Xo9aJ&r4QeDS!iLU#8zkNjEXId^x_N;#ybt!9nD=0B9)UMHK*@d}H^@VD1ZZ^VdN1eA*gqw@= zuc(X9sIExrm4-3>=ehx)NKhXM;;7&pd0tYvREdb*yClbZjhy=!7tN3nr5;9s&AKu2 zIL^_zUS~@YuJRdOXNtm)Ty1iT3D$X)v!70{LB0u)j~A3ACA_3YS3-$i+c{TDAL1q3 zV1Y)FnBqWM1Eotd*vEOTYQ(P*=MR**#mL8R7s!W;_C5)2JseELbO47(@(1*J@x!Yn z2#kSljzESdgbQX*!W;8b!9Xk~3*kb4fd(Ys?RchVhVxrqX^kyjGbd;$R!$_=__d2t)$Rk`JyKoe>wa668Zh ze9TY*_SgE&n&=xS@B#%c9ABT_>~EX|YKwy*#(7Sr5R9`a0SQc}SPTt}f_A)qobON6IJ?+@2 z(nOs@6E{--aPcUbB>xf&IXL;(17bn)56l=XaB+s%tdFwvuT%}3xY$7~jc6_9PwkHG bp53#1cF*qF()0fV00960MT-Fc0NeopHBKp8 literal 0 HcmV?d00001 diff --git a/assets/k10/k10-4.5.1400.tgz b/assets/k10/k10-4.5.1400.tgz new file mode 100644 index 0000000000000000000000000000000000000000..a47619f5cccd6b1999da2b530f9abc3acbbae212 GIT binary patch literal 114036 zcmV)XK&`(YiwG0|00000|0w_~VMtOiV@ORlOnEsqVl!4SWK%V1T2nbTPgYhoO;>Dc zVQyr3R8em|NM&qo0POw!a@#o4D2(sFudV_=W%f7mzL6~XD~aowr;Z}aj$=z!B-u{9 z&z1)wAqf#la0t-0l1bIMjdOeFO5Va3ze!QHY-e_(t=X6)(CBV78jVJyF>f|D+cW5~ z`U2Y4-yY@HXfzsoJ3HdvMx&AZx3Raq_qXQG)^_9d>+QY9-rpL{=3b-uH_&(-bmBjs zGU)xS@!+X>XA$YXedk+~0C=rIR*nZ-;@ zT?;O14z$reIB7OAOL@pbkRnxT-fUE$i!YEzF>&_6%~l1vZt%EM->o-yDkh?ahg~M# zoj}Tv^MC*M|Dn7MZ~z$uUFg6mvXR3;--2vHJi9U@ynz{GF5TbUoQqls>pm_Xm?KQSv zC&vF)V`uB<_Lf-S@g z93aDj9s)Pe!*FaN3K#)n1YAfdG68lN0Sn@LfFawpAVaiX!8V*Cu4+6q#guuAeK0jV zIWj!t5{en&FVz?n#c2fqSr7dBYXtxXr|K`)x2!&~u(8+&E$bF8XrG(%!g z12O?@hLSKC0X{`_QS^8M9Krxa3_!i9lCK0dXwsNPO`G+#=w5%)!xSr+GWguC?=Ka)%Y_<=oo3_I?=gkJ6 z3Q$#iMh+KlGw5*uUWlGF(F8lld<7^Y9-N}K1u5-`zWA@`tN)JF;e;{*seQFmC|j>82dtvR(m)q?!{w%DYWL z1H8s7V9W>!gigT=TSs0UfR|Hu3lImiE{B{7=$Ozm0dYb|M<<nl#GhfJrUPBt=r>GQwns~$p zu7_?g@u?o@6#Un3(h}h6$wQ+4k;DQ}f2@>%&owP00W1$qkjF(^V_c7jd!3UcMiYXj zR#$T1DE(g#01Yt*A_(A>JZ{lAQHl(YPzro^>Om9HS26th7wrHLrb6eL$U=Pb;QGKr zN}Vccm&4s5L4A`Oc9;T-P!<5XonZ^92DyL=wrM8C`fOK7!HXB*fN;fg3j;L_0(%xH zb4z@1Df1AtfkBiJ2ZD*`#S1VxJ2=}1|LX?dpgA8Jv*{Ew`ig@#iSsK14&t4_`y4t8 zn|QoyHO#hd@K`|9N?8CNA_lNPjjI+#;1nSTd601uXUMVvarq(-+~5; zfnqa6k)FiWw8hyz4)iXcOkSySeAbrgQ^?PFA%S?v1YGCZq7j!jqNiRpVxVbbN9j=j z;B@MtDep+=Vj8~oxhho;dL#zV#F^m8qqlGw2_8*IXJZ0Ab3B;@FVPKhSO^~7A;V|H z3tmh-;xOcx!3#0M^}`%GT>Qi0dYJ3yK$GYfLidNw*_cuo%pQRA)8_VqPZLDh(5-pM zA~3|nM0Y7fYK#yotgA*)2n)G0ULwL125oW^pho6Mp$Xo%^WkJ_p#G=r{3?iO_c)~gd2 zIg}HnR>vImYlDzE<~pEs%`vnfw^KN_j_Hvxb~v6E93v}+B3kULSw-5Ur^7fOx!G*d z+#Y!WE`_m0fJp#$fWb{oZVEblxH$oT5?qO&jOc z0Wz_N3|0-AlXDuTX|4PA^4F zV$4y-YDDV*H9Erei2hKinCLD8;2%C*q`{f!4x4TW#YA_NidGM!7)g<3(ky$OBx{Ke z3S3;#*%=cg@I0-2SHg$?9TBwHFoiuQH@aty*}M9R40 z4`;~nLb;eK6~pqytWg2LB;58C=pqEsbx%Ry|4BHfP?O@iPkz3~g zGMFO;dWD^q9B3hr=?N*jGFPTUKEZYo^RSU8X{j}9dUQL)M4daAcr3Ru*QYZjOF1Q| z;XxO*TjmX>+@uKJBkx9oeF4so4}{@5^@#6cQ`k?+YS+ChZ8c$(=d2oqg2Zgm=GMv` z(@P1@cUttE@)>+@cXxXy`npG)0U;~`M145oh{P*Q(l0&4kRui5cEeP*jRzT;E`rvH z#Ut%cmL$x_lB^eu~P8C`xq(n~ejuhNQw6rB%F_W@t;Z-!+y zm6eAk_%7?al8(^Dqiy4q@Ml?2+47%NGA-X(rBWiERVXUWN%zkeh!n!9!2zepVF$gT zaxO`tMaescJO^5R;<0`3rqO5!)sgWsTq3kwEWb1xTf6*|!n(zm6spdf*ROdy+#VH- zfw#0JoB}6kajdOscuS?p8hi>r97B6RENx6r37J}`<>Ii6oZ;KiMM(6OoRcK9ufB&y z$bk+Eo96G@7Dn0cVwSXcKR}fDo`KZR3_lG0aj>s(e2`V8JM}t_=@RV3Ahr&&-|A{{ zU~P-|X2-b+-@15Y(YD88&mn*C*dCGzJB176#Tb|Pz!jGCJBqJ2)a3?U0yXtEyKEm) zdP_XX#!A$k!)G;*uYX1BeA@n}oVROw-kzhyzbZmCP~S!#?+aec^$0gK_Ww)$agELY zmU1ybQ@LrQkITfX*19FrIi^(BQ4b20~BdwtFR&#;l7zlJ!qspSAoKg=PV&c<{2VDbR zDu37S(%t&Iz{K_N4P>aMW^e7dr5mDG%^747=Jw%Y8uDl$J}=?1Y`)FSO+zd%xZ!K= zgXWt?1H8}(%TmAyit$MZ6O%7&C^Cp6EFD>@@6(xvvb~o9h3nVM`g}qla$JEIfFdTH zV<*jq{3IGaJ_syH-K`bvzZf9b!Um-KV7p?G=_$HF);_?_gj6b8!5)R0J=uLPK+9Q# z?pM|0(Isr=f>te>d8bOdHoku5=(qaEAebj7f0)jGQd$6bF_c@I$P_a_mCNbf?V1f; z7kRIODWYaN(#6cQ6)#X9^nxb2khj`hFdUw0_nZNla!p|;11<2_e^p)pPys z#WQ0r*Jh@$DNKOwFcpJDmJ?NUkwgW#0?}^&TWnc@SB~>XpduVa|dmgE90u`kqNet+)1wnjyG2=Ek`q?o`qm3ZzhL|sBkP} zHWP@($f^gfAmOm*>fstn9VtEEhMe0EWFu;(KLAsFgB-p(OhFX*%XkM}TU!Ak1o@^c z6kko3Z3|*sgMrwNdTiBxf-V}X*B6$ntSJAMJ83gWqaXqf>3V>S#rn{cOPkmygCn4v zh{SgzsP)v&Cj^F18L@>s@Av?;iR17^zo;qblRJnDJ}x+135kY4qj*6jJkT=E#=Kyy*Ir0qhu- zZ=&cRdlel*uaxM&3WRJ^&EaB#)u`4p6ih`rl1mQjtg*|rgZ3*HXtkKpAb1u`D{418 zDt%{M*(hpqw^d&04CTHyG+3gKUnqzudvx-ELui%d*U6hlX&HdZCwwUdMOqAE;|1{*mWT@X2_%3 zCgQ)E_#)uzSK5RU?{aYbD-DZzvmu-goSBmL@SRww%a;`*R_D-a6N_+*)Kq#wxF*uu zY*_=XVnA-v2K$5ik#L=G?&QNMC{n3F7t=^fCUPyZ(5^BWMI;AotADIeQZ9BiTdIv; zU9nQZ4*z<}Z3j`}-u{IE^wwG@jjmAc}snREATbL)Z zNgzF1sjsbYxul9rk%K&JNZ;kqfi9g9CY==0-$Heik+3`icb<)G;gt`@o8kIlpb55d ziK3OT4bGxUR49T?@IYbq?TOw0ycW1PM;w%j zhykL+5_{TweOM73k=>rjamrgweryw0?53u_3=n0+3%(f?Cjn`jDgM3^K~zsKau)>+ zho^Fq2|0)ETtZRAe<>ZRLBN4nN*Z(D9bp?0pY4O4T`4&nx!~oV;HHd^iI?Q#A$YST zD48K+K0q#szorZ_8h?c*v`;7-BpMTNU1AOpG#8f;v*1f}w^D((RJlfZ|5D0XNi1}r zHfEKKq6vK`=(^xbDW&u0&BhIXzUvqH;KN<0KxajF6I|+c z61o^UBHDx#BAUTA^cKR$_#*PemxjUmTj0;ll&~&W*wV=(YJfSC8+4p^_B?P@3u}t{ z9-_j*(e9ok084n%0!jfBtQ>jJp_rYZMKAQ8#=iyr>dK9M2|eMQXOL>C>0;Su6;VeH zzl3cAO;hXxgX)YicYkwJ7JakTXjGpd0R89hS$~oEe`?H-4eMVhan{>LVf=ryvE6Jo z67m1rTd#M1#{WOX@0-vl)n8=iS3@np#h|7{f_T$S(}NRMYrNi6C11&kY~dnaOPYB4 zt?LmNc?=_3-3Q---Q#N9!4UhF4=wnb12yPr91 z@A9ujoKwd-eljleO9A7)knsw`_9xdQ4(iO#PsWx1Qf_w6Ii|MeRaETU+L~eGk_fNI zw1_*spJ@K|pURS1S*t&&G2v-`qPD~_!>4I0y6^Ht2B_K$dYabsam%M^ZMFIGAuZ{B zo2O}Q)>R`*&LJxL!hv!As>+}r|n5-9W<_VOwHz5zbtQYK(Eu0kW z^;q3Y{J*JIALL*w-112^^_Z^HoWnuu?NPhPuXpclqVy@9o^ z!UpIASm2FeI>au8!jI~>cmUzi1V#UuU7t+8^qLF8`J+h~(D#&*VTPZ`gk( z1LB$g*1TYrhuQwZj4j&UjUO0&?wG9LsOsv0S-BqJEa}Tt#({&1+wN4T%og+!1@~meMvN8fcQS4(P zxA4=nsxlQnRfwBPy6|H-tP-h~pDfP1jIU(1wpYa=C|}Zvl)|RGx_E>^TH4i;Li;BZ z10+hhDN-o^WMF_q881f)5#=5c-TP6D`cJN+fHGyLS;0}laTs&B=W25a9oj$Mzi_W0q%EemUh|0r?K(K{d33psjE#N$CO zOj~e2PDd}Ycm&Y;K8!~LE#>eLz{hUqcm(+IOFkZvfXH1TkI+hZ<;bG|4{t76GkAT~ z$zy;rD|Wr1oB_ZgLiskE3{(W|CO507deJ{~%#O zrLuSY+jF68jL*7bvrEVFte{kiw)0BHr#z1e3lrzBdzdGZ@IS7#t8)9Vn}+Or0@m#( zOy4)~m@Vjg3)b95^!|f2gHGL}@W`IjM;Z`$mpwf40z}&tmzd2?h>Hsj7Ny#WFJ#0; z>+9yqqh!?nYae`mz?^CnM%vuVYq9=Q`RZQ&$DI)}Rrk7uH{Zf8*Ff2ZA`QwbfS!A| zRNg1_h#_4ig3Aoo|7*pR$t#(DcHNKKbq_VG#b1j2s=V?(MrD4-7Ik9b3{yO-B)X!i z@7ga0mBr^gDic@Ft%KZLzx%xqOk8rk4g?DpD;R=<>8@;Gq((Z)1OYDG#o6|BH0eisb1V>Hi|suc5AkJ>f@{yP)eIwH3Y7 z(QZ9r_xltLV-oPFtv$sO;oot_egb-ecGgDFiu%+tVfOa^Uq+RBh%)H0enE=shd1&x z8m@4=>cP;pX6upRup|-p;+1Y^ILEHsC4SuYME8$QvpvJ>X=02W^B~*HmTKTWUBlt` zFCQCzi46G0Hl6|4-1Y2cJAg{e$e_5>m zqqU697GuvqF|r}Ho~LQf=;vwK_0R-)9y0aWjz>Zx_iN7fVv$kvQwcm(yIcu$Av0#k zE4hunY~v+Wp?D)E7yg=dy`OJfu4zF0T!8&tfaM@koi`iels#ANOSaOVXP;CfC?dgs z3=Avrzk+}cd2DbU+t((sA$D>{>}p%+FuL zwT(uj(b(JB5&t$CjpV;OJ3Gz4HFvhQ8?Rq)?=|-R)@W{TH=2I~jkRKt^Yi)Y`frT~ zx0NLBFY+7Bpfd*x;)4nC0Cp&YA_*usfs`TV|Nig)L%~V20S+L8pexd$*~np_Z$UO8 zp8fx;R1S#Y^LHXx3K-AeYK=ejyoK}BnGW_sC@AYURBk=wE?}|<#QV~*bv1M@y46?8S$o9eSyjP?=5f8kCjv0Y3a`V)KbP+jU zPS9c__>>4;tjD6&!1wPo=#Kr(&F0qYdZXT`H}^Shn@P6t9)GdVBIz1Ar>?n^zhUje}g08_*Oy)l52-5R=MVb$xkX_8X_hrCgeH(9G zTt>?^yLl>a2m%`byc7x}RW|2FQm`SKIc{~qfLI7fVKss2^ICddku!vYkjZt+4BjB% zBG1N@3PsCikO9aUAUckbiiwa*wdX5ym+#Zik$370D3JFH%*{6084 z?OgX--Og{nyo?EnEUHR^w0cwfq*}y5%_$O|FO)bgWV7F@-+13Z6Eawjge?E_S2U;p zQ-?PEN*s7g|EF#cd($HpszJ~B^J9(EY`46PLJE40sr>jzgH>)G(jHc z>F+sqfv`i_rV^MW0`s9<4!VaXUjE%6ng*G240}@x{)x7U^U@-PF*W)}pdV zf3NWA-X^|VoJ|2KEGcNz)%f48x- z_0#@;ir=?yoBseUpAm5+kL&DLz$Bn|HUc&=@nix1u_>mK8W6I9gukQ?O;bn-w1zqn z1*m=hKJkdd__)#MS^=-fU(_;yc!;uCbzz7D=(8CZTV!nE2{IOjg;W}5U_b^fG6jaW za2Wy7Obr0tAcs+)^H+c`WK4lYrc*xu=;;ek#;1gvu_w+T%Q*$QxDZVmLovup30*9f z6%#xytgdYCIH8=x82T%s#L2J_rCIEYV_3k=5?s6wzYHQ4LrIc3a@=;pv?DpP4<^u} zr~<@tU?03`G@AUu!#4C5`?Bpy7U~Hw%=7c1NGdCmuq73@GAgEMU#0OVZ!4?t?39qX z?+V6AJoYQK%wDY2RG(F%ZSm46#4CcK{FCU5LVP0WSNq4R1J!aEx-JDbVr^PX5g!>r zZTVPK!S%=lTS{8=LtzDOW?)@GJrwI3-b>gCu?pT4RWmVVK@I*;$T-QRB-e_njG;02 z-IgpUD#s|`KAOCv8qudVi3w9A+le@u@J9Hp%JYd$0G`( z3?kT0aznl;fd;xuR+Vp7tr{gD2B91y8BX$%zbV5;--*@pozimHmOinxBr9jxJprZV zIu36I$;!Y`eJPJ(Ca4T?TOCOQItH?$$jLV)km5=?)H$wOhJrr94vV2IyprXsW)LkQ zlpDC|H@65h!{kA{-fjsP zC(XtIWN;|@9D2~Qkag~ISOUbG6+s+{)sH2zojI%xjTsu^uc-S@Kx^C&T30z{lHhh5 zs{&NQorB)J;XN3^cS0S%^CuJJ9byZ;N6_>Lu@9k0#mQ?o>&!S7d%E_4maDXht(7oG#Pslu4d25xn+cRX$2goJ&hA?E1(RA^w>gmCHd3J92+%&81pp&|mql=!1Ib!(Yd}TcavEu- zaNXgpD7TiboOsMbxRC<(S`ZEb=Ua`+gx0d2OmGy(Cj-cvh6IvJLQW02zz=AMJ>|gK z&E&cCcLac9PkBgX0*X3BG=QvkIY8fJ_LX$D=&JFj96;MiK)Zrr+Q$~aN<5c`Ci;sJ zV$oH2wAIBG?XT5sCZEf~60G?`#sMp$f9DaB5~OV*zoGQX>dEJoWI#E9b=|~cSzRE= z(>`QfIr+SjViB%5T{HHu5||>*ZTl9(YL$8+O>{Z$6fO$vF{Bil#5=a(6pi3i)~RpR z3+w4l8gVI%Rc+K?zo{>o>h5ASZno-MJH=JJSt}3GW*pJy-e39Ae~JCS8M0jDt#R+~ zZ%g<8c3(I5UMKedcD5R?fA0T1#ZT=23F{W%2@olxkN(Fw~>I!mC#T zvY5xUpb0!e7-+YEfjovM*nkWH4_P8oB}kJ8C}IpdQ(6%LDr1Bk_0ji`SxIq2U{_D( z+6&ZHgd>SEGSowti!ejK5}{1-?_$R+ED%wTd8E&f>`&sV?HdmPY`c~S<3y&4ojgEy zd6b#xPGoS3QbfFXHwT#uf-Z@DwoK>-Uy2CMqen?7Cx$rI0yF~|r7kk?1ZRmUC(J^v zgq-pczsMk*{ojCz4F;+*M+JDv5o&CN#dss9n2M4X|96RO$36#{y|O>+%vd>J@4-+H zI#UFG!LPtCH_+lZhXe#sgW{8X`o-X7plLQ0L+X+(fASHad8Bz00GP6P=_XLkPaw8H zwacvz*HS{q7A~UT7I_FzaX$+(>)?#{AaRiRs^f(fG4*OZa4q^*E%|5d`}dWEqY_tk zEmbO|MYI5MFdc>0PYGI&1ZDRoo&fYm97ZFU5~N`1-UM+z=_(m-}z@NZFt_2MQs{d~lRIf!RLD#LvS-{zy0WhPah~jgM1d{pX zrGv(-jTgCOaq)Hx%Qu4v2HPf1J$lg-s3*Ce5%-X;Rg)-XQRYcJvdJiHNO=ec?^^9> zJ90Iq!zopvnm2|qbLT=804)TcNc%zbb3H9N;$VJseI^eqLCcc5CU=2zn4B|^Kf8K&Uncp-Rj&>QlyB8JRG z#zsADE^(ydEcr!1=aWQP-o%Wf38?;^*8a}LE+!cg?7i8T?A}BaGbi1rC{15HP`MYQ zR0Wr3hn9PD#N^NWEWojWWg#DQP|;G z5*+FJLd2JLU!S8!BgO*d%V}f+;sh;-LoD3vP4}AV(CD3VSq4P@k(gS{j;eaZXJ~i3 zW<%FSUKP9yMYUQCjpHVca)jrDW0puh3K>>O94TG3W^(dgu8dK%ejQrw3?`=d8KKNs z2f{^tOl-tv$ftlB9^}gx833$ZXu?Y|Z#MK`rRk1Vv5pGPP=JX{8Dcl>+9e)~P8@IE zynYSpyi1a&;*uBH3rIEag^aI5X3U!n8X7vnckN+=Ub~RZ_Q9rl41GAfzv8=a)ie-l z%n)xtoaCqgz$|(#*&#jGI!)F!on!aY$x(-xg9VU4H z%Of$KgU*l-#|&S{#od08wILFSCe=diY}HX1i9HF%2$#_rEWI?WOXp&%LW@keV~z-6{v7t zL>B(4%*9pST;Y^ zvln@h4hrY*M6Cxb_)3V87DIi~G;{}vbDHsj$XiaR)&orNhynOdQ)>BO;-M0;wHTy3eCU?p%4?=B|xPb!x<-$6ce9s!6@C; zc$2y=w;MU7S@0zb?gL%cOfEYjv_Oy%H+OJQ825R9E@-fVNw^)UhR;pRYA30%{pqF+KA$H1Lx8bTlola-E{}B z4W~$))%Gd!z%AsfBt|Og9IoDs4WH7Fr2r-@x0Fm^UJvJPs1D_m++Z>BVi7h=uU(j)Hh;ckb+$WFQ+KPL{TI1@NX<% zZp2u~JB)-&N`pA^2qvxI3BY{iiM=Ub@R?$ib9v^|k7^QnsG{Y(n2nO;3~8ELWDKpk z?wYplw`18tZiN6ZaZ-q4Mx>Dz=hP!4~fggaX zzena3-B)R{B3}Z~sc%xAvKPK31d+k%e~hc?;U0z=EgSPI z!dRocNg5u6e0i)(2~8MJreuf9O~jC`+#;ph!L2lWBPpon%?2+T>||!k3$(|@>R&PN z9>-=ns5O!BdZmd= zci0rM%)VOPZn*c1@^`u)sD%;`Ho{VQy_d8~azgRz8}a3Q&IGj|!z`msJx>*5_Sh6; ztLd`VR$N(Bv*8Ar^P+0zNS>QpL1D_1c4?iN)QGZN%f%1Zg~#u@V0a3ki@_W%Vu6r( zSmLowtx}2|kL@9uuv54|p2BuX5q+n_1F33>vx=)sRlGtbO)s-1EM9tq9O$sD`HFvS z%gaa2jiS$TT6-X^7u21h)u_FZRZ7#V%6C*Q+Pv?&^U^e{0)0s7E%ELb*Bzl)c-dE> zmsiLiR^oF#yqDks=uzo4*Zy=BV@wefaikXUO@(*yKvMwfGAv!xOQ2sisMvgdp=hu|3I%FjDoKePBcPpEB?MzxF{@v|G*gedsz`hO{6! zufQ)iLPV0yss3=l_wQm)Tu5T=$%PDsN*?F>PnLbZh5xVHlk7JHJy44Ys}cyz>r zWLbrHa@0|PnLNgGOYKNhpnMaLJIkhbikNCUbI|979mdCg_4D?{SM%sCyZZFu?DJ*Q zI>YZ~=608jFAo=R>!M*C-OWC4ou6DC9X77cFZ`ud>mD4QcMn>7 z>Oa4G(7L_EhAe;4YkdCnp>c*=PWMae?yTLo<^OdrKHhc@i2Sd;bK5<52S41p_SNNK z!~FEYGQN_NyX@X|4=nrY z;GBKFJZ^kmG#Z~royO_q2mE>KBkPT(+uft{tMC>WwbnTVLSl0~y+fF8n&!9=-40j^=M|W7IoD^Urr5o9-W{Uk4w$=T{e3 z*1J!`#)r=6z3X4uvn}-L@bK*D{HA+wb=Nz3+waW>{_*RF z9cWn=JaV^<%iFu&``*cD`)YB~8Jt{w>K$HQn2YYk;oa5I#nHu=ivx43`QG?Cdp~Gg zeC-|eMi=wl4m=hooEz;4`LT)O9H9d!|Cv&Pii%)7?5>jO+^s+E;sMbm4c;ub8>Db9Z^(v&QG<4s>RRa84WMmv`HzTh<@L zBkOi_^zrNE#k-5E^V#l&{noyEKOpAOMdPzQ8=fD1cz5AkjG#65K3bMDm|M>9^6l-p zHFG~Z?g)0eH{&*KpwVIH^H*arI=K4OyX-d5`Ca$&@Lh9warp6k`|!*8aiYnemiu(NY@e)yqx{`T_Yoc(e2$@Q2yS?K-1 zGVI;ixO3-z-n#IOt#di|C{* z`19x;J{?W(x?kSmt1rg()o5m)eI(Q4BW9uVD|c+0mf^hPW4xj0-s^suH+l!Ht+Rvq zUH5C}?reVe>3s3F_xaNY@9NXwc6@aB9bH&gU%ITj^?A{4HyYj12kUe+{d#tA%(`}`*%M`q=Go}bDx)LXxHTQy zZ|7H^x_ifM}`SyJ4!ujwSb?(kao#ue+m)@cKZutJf z`)nWf%}opaDLS}Zw@Ze zm(RP#)`xeaPrbwLx!JfFnV(J;?09rEZKOCDQ>;35PWAoC!8ubni&rch(cik_&t1HK%qx0RJ?$M!jvc+~sM^`sjpL+hu z_TkCLucl>g_4=1zW;>|!c4vGz_%PTW_?HJ)JLl)S2c!8e{`_S&?Ap7Z#UfYW5C4 zIv1VG&fTH8{dxOjYhVp+<7V7&yYTXD=i+_m=JLGfcN><2?6((#t=_16WW4QvB)?VD zfdS#D4gz~$)(^3TX#EH=P}Q?`xQtM5HcD9G+VJpH5*pxBI7MKJZ;&I>yPNu)mN>>M zlY&c!U#1nI;nY>8g@^^NKOIOS^wGVzem?m2VdirG`fPPyizsl%z8P4!n2x5mBwrXL{4NU0P>ZC3WenVEP<0CA#D;z zP>KaMYOpkkRHBkXV`<$uA}bKMB4iLdwVtMT^B>@N0vwVQTWyYm=v72HL>-Tj;ho|1&B{_2ij3AQ+$yq_Blsi= zyenQ0G`7(b3#)=IJb1A(P{Z|8$hQ0E55trLT??8rhgC6CPi3B~RS=DXT_>hF?7TmY z#jFgS*4djLePXtK%7`63F`A$(r)-pr#HSb?dDYi|^s@6&MHMp9k9;qtAquDrA)EiG z$PDf(1WKGN7d|I_Pt3?Daf$A@ zEK~fRgM?;}j|dboiry{y#Ebze9NEdmM0$x38!}PSLY=6^KNFQi6(tXG@W1h;lmS`t zy&fOS0B>Y;d>qoB;0wV{xZ_aNy$mum@vTAR#+YOe$I9Fo@_SUVtR@EuR=<>6GBVU& z_E0rZxf+B1XS}!b1T?YtvJo{kqN@?LgY6=Z@~O!&P&}hnT4x3%1dK>f71afpMp(YqnZqNAwEN~%s~1e;J0r;?Ywp5hL0>1WXvHR z5S6P5`V$xxW0PnwzkKB8QxN%uj;JN|5)H*qtfpYaj(Wwbrio0ZrYOVNyyEUEG)AgK z!LO6P`@+ig@Fn5^kRvkwV&Z@~T9k|hP!`FFy8R$*| zdXa;L(MkmfrrV0=n#e!Nm`po1_22|LFqL#nMcutD1m=py&j+U|aADcLE~7g}X{*Sg zQLYV-WJM5yq%-1MCJ-r`!rMcIM5;5yWCDtC;$RE#NonvE1f-|~pQR^K2UPPrBsxe` zUyS01c;(q%1&IRNr)=eJNFkKn8#3F={X0UBR1uHZ_bp5AY|`kAyZH}rp^B+USl72K zpkheGbPNpPPyh$Sm?KZ855b`;Sh`mu^JP}>mMLn>9oE8+VycsU@c(FdF_R!63NJGP zU~!z{jYJ^@K_1C)L&j{VX4RyjviN1wANPfieHM&Tb#Zx^fK{bE0+=OaL^!guQ5KO| zh(qx_WH4(Xqj>(GXqRWQCx0xh&Zr2iM8OBLDl(p)j9Z0Mddfac4H<3vCcmW8WaeO~ z5`T8#LgWs$UABLCR{?NKD`uFiW4Hp z@TyryH?rWR-~Abo`zP=n{72wP#gmNE-@jLq@Al)1$E+$!wZ(D@3(c*>O>P~@h&8Rg zY9P!$v}^?w^P9dCy*5*4;e)hFzGH;>i$#(TPvz#2i7bNC1XIjr{#aah9get~L2TQ1 zFk5WSkY(3I6}maL$as^lvNzR>utrW&^h%g8RBcOxFiVj>z@EM9%*#dd@GF*tsR5x2 z$JoN$^sAqQg@YWLFFiCxcjAir>dikJwYUHJfB)xy>WM$9%K@dg++*~JjM1*^5!b^a zV~UG41{31x)m}zE4gDpokjM-!uAh&i*Xkduvb6plQc!geyCvQnJJb67jmZ0S)2wcQ zmlWAIeBjwG7l7)2{|1`%=G*#R@?=zu%k_^UnYPuv0e|!DCk7tD;ZlT)>tGQIJ39w z8%3;*;`rQY?|=T(FLD3Fyx9n2P1bb_MDhI(dySo)?|*oTUt*T31)U$U z(p>327S8gBll&I5nR10kPbLWYL3d1^P+0fIN|9SW-h8%u{vm#OIgNxI+OHIN-SZso z-?Ea@#V{|EoG!u7u%T$~xo?rYdqd@9PP^ULz{C9AnQS%+p(}1QY^)m7N!2Lk)hKJK zimg?RPBl&ZpE0qkMWE(crNZ)!NP6t55%oE5{AgP-XM`;sV9}h3^%xhzt<wgF8|A8crOws<3Do}Wo2a`uSYq^vg^fi#(|zDYkRL(J z#TB%e%6S<954~w5*I^A*Yc-jNw8q8bw@71pfJH95cd=qzH4ec*9;=zy`)!li?j~w;osvq%6~S4w!G#05xF+*Eazw-RPc+e2U7(kc!Q57qz;6Dd za;Z%|?Fc@A5kN<1&aWu7+q-^FxWN$>tL} zG9y?z9Mut?bM~qUafPv33ujZ_9Whl#?u zL-)aMW-kDdd-*8UsLV8d)7YFLXtCMXY?M;{mEU|bi)8>{2Qv(<17yKPPda}$+@mx(>USjK^R^dxTBn4bQR&d!cbJJ+p#|Ma-s8XceY zuG@poL8mu5Zk-NeG>AQzeNauO=?-oik$=^ub5x_S{)G}J6?dJDay0$G5{!@Eb+3n~ z*RB5X^-1S*8pA`6*zv9bY6Ip3Ee2?k{005bc589pUqrusE|H&!R zHp%srBU%upAER#QkE!8d5!!KlP(UgbB{b+yb&dKpQs|eZyme59;MT$OkqFIu5T!e`SXx^LL03Kp zSwZKO7u2>ze6!=+6w}o{Jv-+;cJVw)o%p=VZZgPPKCuQ#@|I1jm&5B;yWJTMxiPa| zI&WzqmACX--kZn`nw-h&a;WC42$D~Vp#&2_G*_%{=`pki#6sDOK4~_tIXTCz?)Bj8 zbe#jGmy$*7@n^x^-|c$THR=i#(YS37V$Z5-flp~*2kX| z3Xz;li5Sv=X`-OK8>l41V#%+Tv256X-x+isVJ=W;6v=}s5>5vB_^p2(u>yX zerq_qJR7W0XRUyrtJ02TI{^YzJA>izSxIRQZoDW)Qu**Fc$$Nu3Ia)JDs_8LTJANj z?bGw&s57`O-{osUSYejGZVde;5=_px;yQLXYK=OF=ck#TgXn43qT?*p;!f?J9du5w zhoiGW=lbAypb;gWa3F`sihZDJGF&mY;fNYuOcUn2MC&rONiM>`Y(%R`}FMS`m}S=$rwP7h^|GZ7P>)J zQfCz$1z*Dd_W5vh*1bM$z3XJT5ub+jQJf2wCd-bEmm{^+>z$2+2lZ*`EyBK7);RKf zvKjVUWmk^kf}~7j#h1Qtg4>8eK3v6JS7IYFYqmS$mmVE|s z4BzvR!-fuY+refiEynrsY;baV);bW#58t1So(t|4rO`S0!3vWI#3xzJ{%zkUo zIz8>2UZ3|l2~R7n&Bg&_aLA{zKJ=hvA?w`b1T4q-0qNscyogR0O`HUrG?znw@advv%?L3qEI%zgU1{~xaWo5m|(_uL@w9op- zogadLHgPdp5ql+IKLq|ts+LjE9v(lt;It_&D=z)P*~RfeXYlMe>!jO7hzpPKa400> z?6h-za6S-5<&&b^x*WDB3?UVEVv#!S8L1XT_dL34P zt5*8YiL4rnC9pl{oOVXfi|hfi5G#Z0`?Ir?XT|hAA@dS=w%>Q!CxcG^>{*c&C+-Hw zB_%K(j#{JPvtm1BkkJyDb`FRAv*GAR7-Sq3LRn(0rCkN$4ZyMEm(|==MLJ!p$q@Yzy2(Me*B-jmG}*^}$)U^~Cf9A>su@e1T{hImMny!iQGps57{39q|R+ z`?F!j(2Q;CDVT7I9CpwfX76$aW5j2(y4cxk@xKZY5j&CBt@F|Qa(pba#l)E?ny+z8 zau~_KWbTBhZN+f=vG5AG`ZAknbawW{-qGNyyI4lja+|CMG?2$6bQ`)$_gUMmc4siU zZlCoIkB_>oevWR7$x@$-m9g5Ei>PAkd_e{aW@c<619~|-T~Ri6&quA%aqsB*vhz;ZYMox|-DxMo zNf_MVUlP#hba*Y~;ovwURN-;gUtM3GQn^Iq8MpsVhi&As1MHPWzJE4YrUkP(ek)rX zs*FLzW5wx(QyX|;*XHf(ttB~i1|MQd>bl$Mo((=<4>}*uJHw}5LFZ@AE|_UnO?kbP z1?~R%4?uq;8TSK-PbJbWD*(c&^6cF&^JhPt{#CmExhW6Qh8KsEsmwAaq_Z)Bo;jXO zs$t+pXt#qupziBI_}B$SlzcHjtS4Xe4;3Y+C2l!a!&7t{F-zk{48|_;9eq+zAO6&LRK^<~Ly8TtUJxk6 zn~fxH5=96$jo6AJiwG0lwFHJCq>YWtJMxy}yk09Tznyy<;u#=g{T}J`*kMN}>s)vc zFU4w<5f4tId-^tSL+eSz}QK@d)4%>{D%Q=j;?DVH>P<^Wf zY%gYnY@HX*YIVKBqfxY#nOjp@{acG@xsv4299)jkXV=eYYUejeO z#Qd?E*Lk8wauFV5V8J=VSpDiMSg%iUlmoUS_xqd8seVSmGLfl z$g`Z4Up zhK{)p{#o7hskbRo{A`Y~vuUEc>MN;m5&wg~sOj84a^r6SZL9jP%rgQjz~#MdqPtCF zk_kAzP*j;$lCm~qEV^n=~c+)6YDU{_^q({<@jHtDlQ6OqFS!7CRE!wZ~>bGdT z$!5q#TJ|?{GQdcXg{VZB`yXSsM8`P)n@o6@sZ`KeDb4Pa@hmUl8FnxWOVwrwnkAB- zt(aZ#Ir9Pv6dZ(1dhB2pR(UTd<>*YHlGJY*SDOVn-qPs;zZGTh(>0$be_yfc&rE2T zt4fwI@Hct2Mk+gRiZk~F7s@q`l?PH(Mb%k@-%{q9(di6CCm)7I!iUV@=KeH5ankZ+Qkztb%ki zs0#T$lQDKoRaX4nzl=d~&~3cwY@02sWpSkGw*<`OS}DJm@1GzuZ`qS)#e_ydrukD_ zW{H7(D^ANpgv+bDwIY?5IklE(_7EZ8oqEtjaUm6XJ~3OgWp_lk-8|oE(Q}Hteb9)! z78hvuJ$!>LG({b1K$-SZ;dGx~uPW5m;-twOzgbCAH4u>-E~GE-%ruDnp8nu+(DEa# z?=!i!`eHx53Qo%*HD<_$H7$-ZyXk$zW}Vq|?a!N zGh0ewKvRhTvpDCLN1HwKVed3TKc<%WK~5|`XD*STy&qL!=A}pu6ld0|B(d5O>kcfB z-LGYpwT|_X!u+|fX?V0>#{C~QV#veJbY0hfEV=(La?@+}o|(A~1tD2rvS8Coq+3Uh5wZxx{1Gnd)ol9Lf+hD`cXtrqlW{s1k^7vToLm zTs2)S^yrDvTz)4QU_TG%IuLnERj~C}MR?*n-%u=t+m`@nGIb2dazL z(tgO+@;5a@zyrV%nNGMN7n+bWWZ8B3LEfpNY{(sLk{HBxiGv)Lgk0f5z>$;L_qxPH zYR;*F0fOeGhZ#C^#9SE!R>}f8v{p}1YWj(%49cHnDE^A{MJM^iqM4rXSCkzIodI0p z(zYO_F>mHe^arTcs%7gcS7`KAPxMc<8g*0V6)N6^Odwg!y*{s2tEFbuNouQ3ieX#Z zBuOcaO5#0aXhq=-!0L-zRg|Y5joKj67|F+d@Mp4f(}Vp41*;m&F%;LfX_=J>Obxv& zHZ|P+$O6b9W^nybO9sN6p!l|XZi9(+hmDwrjYPu^G0`={fnCcMF4_lS20#~5%4Qz% zr`#md%LSb#Fc?Z29C5BqOy?`{Bo*<)tsIY7N&FLyCYzL?_@AlG!osv}0kb7;2iV&O z(rYB1>83v(i$i4kMdlt~v@)~ZC6yD2^Y2{jp)k4rUU)4)DF)`SPbsF6j{h%#FL-J zmD8sgSjxM}Y@0asNSPSId!c~kqpU>NcXBwSE%1GTG@K=$FH;F)s zLN*sM8UQUlAj@C;?Hgc3+&GuG8%1Oqj+V{DjmD;ro*oh5Elzrb4pRlI`kGvz&tODl zFnxn^QUVKtx|6$z@u}c>9qFkM$8|c-AJbo+n7SUkXd{k+!Q{AQ*=GU+$dT6r)NS*t*+rM z9iS;DPR=m999|DPN10Pm_HsQ2uml1&G4qcnOZn#ZYuV;CxDqW(DQOdnubwe+`a=Qv_%}3dx<4;;CbJ-t7$5dQVXV@j_JOPiMat?yXw(twjRYP|dIP&Z2u@8_sJ{%EAV6D3XrppmZ+-Q{GEw$ZRqRX?Ipcn3Yt3Puy2sQ=v?i zTZt}L4Gdz3x58#fI)vhWYRcQ?t3(f*2w(=q){Wbq3{GF6ZjMX1U(BQ4@@j_`-*E~%mAjZ#5XoW6y_w{#`Yj9q5^&NNheg3>+#Xi8b#b3qG>EIZAM7INVjNwTwJAa#+heHY*R1lBDi)8kevy z1t_X?qPaKX`d0wa*xyYYl-d8KO>kOOPONAiXs1T-YgHZ%xLeC z4+4}udlo$vP_j-IJ?X;&r4d+ma^OCI9)67Ap6EaHOu^4E;-6u}&l^VkEiX}-hs1HR zetf6bBbKFP%Y%;DvfLT$`8L?i*sA?8f|Ro|5yX;-CO8npD(gb^8KJC)*e&tqVbhV9 zseH1rOqs>-vK;F7Q5AX8=>rF4K$zDN17a${!9 zNvqEbh1Z=FUUrfh>XcDFGkklsn>&`=+mR0KNd)$2S5Gx@aYuaZCO!zQMDs#@vX*=) zU6zS<`c{tBunw=~$Z35woL%J58J@6bLON)EC9I26hCU|3qGg&9e^#7Ce8~_AD+)Q!F?Vy3R;i3ZXJ--0zXZ zy+FoRyVo|p7NEz@XohKD&Lj7evK3bWcp~ic@D{90Y*M^helb=7izb$j95$A-uB@KX zo5C~_xaoTgvrxOo-B~4X&RVL(5}sa;W2=8mRdBHcmP3~0xzu?%R6Wiu8j0ZLCAuIBZncG*Itg87y#ot3b7wYkVdcUiNL%m6#WiNpTGaLa@HUpj|2B5FQ~UqBJFkE4|3AeqqKZP_t?Yd0;*mw$9?SWnqPQ}* z#pU>T%FJS6IGfV9az$vNzz3Nyy!fU3@nouk#=>VAd!P|)k*E66(=`X6kCxOWGl~){7I-&9q(`xDr<>L1%RnXd{?Tsl1PafeGd~4 zvqcv>*!Jyxu+?ZZD#3=HL^-C!!|qb{ol@ShtyHxQt`OCY?1LtUjUBP2h=z0Qj!uUc z$itKH3W=ocRr~8`;^+Cr^}h*`O&q#D(@*@C>Hn?9UP}MJ-ukKkpW^o|H)n!5gRC1K zX6nPmG->W7;=Y9N4AK3=zWCb5o7ry7g^MX!L&%qeqcz`SM{8-Ui^H(i5mKaEil;M7 z=Yb2?!(k1zf|tSU`Up>5#&)W17iqoyF@D4UuV!=Y4z2w+_1)UmPNUx3+j+CI`=;@B zD|YZ&-);%2)I!Luf`3Imi~FGXx9z>%M#JPEf2!yo;g`_=!IsuKCx9jTf4kXCo&RmT z{%QX|$xj@L)$#ungaRNm3l&FOPr?JiRCo#qn;|cJMSWuiAgxp<9&s3Q%&I6A+yh;W zo>L8SNn&Sep;mO`F}^;C2g?MX#MXp`D7_Ba7bngl$~!2c_g3;ud$sVeh`~+Ie1Jib zIKYWVY!UTV%iK4Lo`y(5^ddVyS=gz)c6~bNoH8lfFqKgNK^$P%8!EVm_xUd+AidNS#Zk}b*^Lds+BiKyfHorm-a6EDpucfi z`2v=IVB!pwZYGX@0Brv^5pNR&pTy3p29c~AA%V#zH$XFjEck`lu8`n64!ZOuV6kR3 zbE^?PS2f1f2=ZaGD)cl4CU(t*oQ{(blaVYK5yT_p#MAGkst18|Ikk04m*DT{(YF!L&PQoLl$pd2x23y~ z88h&Yq8bxyp;USOe26UsitiH&@9q2keUnc)7Mb!MB~y>^nIOj2)j@%8+5@2v#tFv< zrrgLU`o@B#+N&MpJC^8fhdvJPpOBV@NlPWW|M|_5Mw`(?ZUF224%OI=>RCB4G z{9!tEws29ff;Szy4-~ARiD2DRdJ0aR`dz~QNO{##;%Hw{g|st12Wiuxt*~CUAAcF5 zw{$o>Z6lAh20dLz3*8toSdYhkz*}0g2sCSBXhFw7UX4;q#EQh?Iv%GPd2@%nJu}G{ z?ZTG!^F*T7AX(WwCqnXiuT*aP{ zgvN80*g7Od#(>fExd)iiIr{P<@bI6?`~N;4e{aA4pFcTzQoR3<4i3NE{}1u`B;T9N z1--gguKthhMg1iYZ69+ve)G-q=OqA|9=ocC1yXd#xsN-4>-~O5h1V4fO1JNvC7Gw9 zx-YWwruxCQ-8T6;oHdrZtsYzF9rZy3kZzhR3>NSQoDfmM6AKgW!CcG$X|lMqs|5k8 zEmz+0HwOo{NJ&JK#bp$;8UFwoybtzOv%i?1q3j#;FLk1yOLSFs>X0=)v}Yo|p|Q{W zHa;@9bBW+TWxcP3OmgeY`Yf`a)SviseMb(Uh{Lj(W+nhM5Al?d}1;^x%FV9TZSR}#|!7&(E zCeBv8;GxgMc^++eM4g4tVzE3XlflVLXVq@LC8LZqT$e+0e`%r~Gbw@2-v6$xTRvBryQt-{oz4`m+{^U@H)T|*)BLmgr>^}PJ>L*;5 z_jW;TM6NDbUBIt%Q*Xd-vBi#<)qDMkImB+g%|F&{EW5w+_jx&G*x`1!vfOcW;r_?U z)TKE>4k;V#C>wU-PR?@q62A|gTgdX{4MkZ??Av}W6?eWb=4y069BqQ|_~77e1|wIP(-!x6`=UFUbo-~{ z%JPjHO8jgp-MGDQZXVU#e!8yZs z0O*~4Iqa7P9G>;ecZwsPhQWF)MDmgcOs-|Zmf5d=PI=7yek`Kg$RORn8J?WLA8+4` zpqOQwOa|u{L;L5eVZWwLa9zJJ_3!>vy`=B{+9l47jX;?tz!q5PyD17q_Y_O3$+RR^ZM+H-XIU*{k{1NApiKZ-;Xp zez5zIf8a)Crah(JH3MPVQJl8*(;eDXmf?b>v>Z8}fzQj10Z4t3fVBO?fCaO(!+l zM+{ehkiU`$iPON`uu7Rbc3y^ZfreVCbMolqiM=>%8V6uM`jI3tU!K7m6O?emFDB{C zeDTvy`?b9Ccn)HKo3dxPG}3}vP=#G{$!M??`F;MfvaX^Dp&ZALR4P@&C_g&$|5wVji#+3vwJK5c3oDW!`*R z;7wkT%7n#PvQYhnmBX;>#^yQBw{pbGa@|a5vP^@7_YjIJjpoex6^#&S&N`ODXMs#a z%zUnA+t}YUFqiT zrBUA;9=xF5bsD`$XEPSRZld`aQOcFTa~6M5$uxV@@Jmrjf7kq0j)XF?_)OL~>vtLMUIvfQ+KQfI;+GwQG3QJ%O{ zD#47(4NE-itxit-4s#QqpG)D_B`QBp0F4$2kxPz zJAWx}$I&eia%c*$WoKU5w6v%yz&#Pc`@%=g3%lPwDQvgXjPzcA{G1Gc+h=83Zkkt5 z@{ig&w_oKfJg32?!Ovo`;y#N%Y6hjwmD7ldt=zek1^n!0WHX7k*SBieOoT6gUm^J4 z4#v?0%ClH4-_e+L5-JUoerpmW;!o$fUn(ZB1%f+r&JAhK63ao(gx^bLB9;|d%8rU5 zbd?=9#bF&)uGn;cnTFt+oYR6f{8-8QiU3O5TxQl1@w;jkj9+_|$gJuDaATgZH*TZ6$hi?l*-}=j)|wqU7r@x~ zhUNEC6Vaw}GwpV}_RF#a*zz?vW>HQzKUdJZ8*Vt;oH+l%R_*4`-vtY1IL8iWqC>cp zskRPR7ojms^kRHs-w$-jWfQglhI91agRgGb)cgk4eUsDR_?v@o(1R)v^LggNtqn3f zoSAfjGZ8Ooa!e8om4%sy;1O%zNN`j&P2Gq4riMq(wf&X*Rne<`)9Tf(RxBJq1e}G* zu)hUmrb)TmBy)n@))kxO{WEb+(C!Sjc_%n-&1mqi&>7LxYJIGiLl&kS=J#y&f#V?i239*KZCFzIHb14!DyahcgFb zj=Ss+KM-&_o{`4igKc`>gedK=4)ztuebaGMRi+9|BLg_2Qr?Kz@4qPF#QcOueKR+y zEaLQQ<|z)cq4>pK;Tdkj>TR9ai}6YGm&kU^OdXOgeuqdsjaWEwzTK4rWw@|^ZDB+P z&DEYne=m>!UhR8%A2*ipavj+3z{V_uUHMp^KK4Bt1XJo=e;T0>C-SY=-8Rjv6)Duv z*|pjec+54Ln+8ZFo8*v*BUy9^k*FE&uV}mI9P zpumdp%Umg(UD)jwR{OEry_K65i~szGVWhC|&%{GbLRJf6!JzaLD1L^sin+KLpB&?l z=5qr=8-8b zuoXhqumA}{3(ij_8~U4{-7Zre`u)*{4)|lS`PsW;@?tpZU!HX*lXs`%{^i+8w>Nlw zdeR?^ZOc@$c*1DxEsk?uFOqE>F6+|4c6slR84r@GyRg_7+vWSJpGzh3Dtnujg?JCY znpysn10MjzxKZ@Cm#WC|Q^x}2{R@u#k3?(`4xXDhRyYI5w zxoVqlS)1G4T~AghcWW1g8c2o5Vp%3H^UJa8CYW6ThX*A$x4-5~x%!UTm`EeLamy8K zij`J(xizOer1841;ee~K(qSR%atA^hG7A@G+D+0ZV59-9_Gfg(JrQ=IaNaQA6|O1! zy-d~tlO0dWEQz-3UWl#GDdL#?*w$t?J)zDkmH?SdpFLrrS7CSiYjamC`+u=Cp!XX9 zaEt%{!L#S3{67azztsPGkk2p6>Z@okE6~S2Xo>HoJ(~@n&g4VVa32gXtw1hWoN7ir zk3}FE{_ zqi9V@00EZ?*>l7k===AwK^p3Ow3lMV;!2BHv#OgKYnQG+{Z#u+R_)NEM3FoSo{yhB zB|cpPE><+)K2O#<-6^%xp0lvmElr^9(Zb+O09P2z4%Hj|p^-ycqeUj&(H|_Dz`}Lg ztXQ15^P2E^$o!rfq&UCQ|a$8Hp6KL@aT$!TN2RS4JX%Z{VPC; zseJ3862pqho{eikWVS5_j~4L{`uTz9*ym@Y#X$gpG#rft}E^pDmJRyM3;4M z-cfhFJE)J;C2vt9x6b2%)XH5n%3J1{(=d&4xtm^@_pC?)S)opqOE){?lb?RlS(rcK z65Oa4mQ}(EO{t_L6)$Y+Lxm&}>LSdbsCBNtkvc0D;-e7#(Q{KdJoFteksq{?ONh9p z?+JNaYj0`R!N-~o7fL5Js~)ozXE)#4Ak^;~tHVZ%G**XGme9k-pK656C=b$@nf!di zc_?C5BSeibn9Z1%$YU}RA*&g0r=H_BP{_)F6GZY*gzJ2LI}(V^S_m0c@usxtA~8kb zlP8#0(4#NX&<_xx-v6&WZAi$c)X+Yju~yQ_ca7e7&^;e0L|mNp)nBF*7>&42gGIh; z{MP-tfWtr82St-6lzi84DmyFKrv9U3`0wmz#E)$Jshk$2>T0mqo9HaK1E*xMeQpIzI0ct;Mj>aR1DKpinoH3EzG@KbsHYe zOE>Ds?Oq{J==rvN7Or=`B=3CtW~tbH%ZJDhKh;cv{PT5WvM!lSWtmH4ehIg(ySzp( z7E-p2$liuDj}N<6xTGqQrN=mCWwNvyAv&LsxjSc0=IC}AK7J}j*=a0uv*)v1+4N=S zy))OVOB4!;RVKa5IktZCE(T&g=iwZ;uWf4d{3`YlnxtX21{BDC=I#9rIl<`=9!6;b zKkvNvSoifVt)Jc}F(45bYWrmn!G;w7yPt7Q0^~asYMt$>#xLUbn}dUc`$sJef;Nr# zF9Wl1cIbQ_Glg28MqD~N&7Bc^7wlH>xNIWbh@a&v%pb4c{`fga#fYtKEqnfI zs0BDc&YJPb$9jVdU%h1_-W*(A6_Mq$58X6bIEjOs_q?Iz)cfqiuSlZJxVd)jzOQ4y zvA)jzooxs-D$>SROUlDu@&UZNNmeL25T3drPPhcMFqZ^D6Hd=Gnyft44iT>q0LB>& z3thy&iekQ`@w!F6l2F%!#%lw+Fxvf+nK~$XZ1U4jV68`1Vy9sht8&RIjhl}qM2p<} zp~uSk6zHXxq@)Pd5YJ^8Wke`SZsG``@$YU-JJy%m-3H zdohiesRRq}tofr6GnwR1g^U88z!__hM&4297ID9w9m?Xke^8RDIstbby`cwdl+IB; zQw|Cu@W@FM_Ev;E5it+vXR(M_oM85_@=~%>(LpIW_aF{SiKont)F9aODN0Cyd%RVx}Clfz~Hm&7E@J3KJD z3R}@2g*UQtKix~eTOGR+LXp7o((4{Ly)7sdKKRP4T109X8uW_S0qHcGiZa#Bu~TbT zZBfT8Sw@}+Q7-bimyibL=lu%ETu)N%X}eJQLLGt(vlCcU!-40NA8c=Yhsw~BgDbZj zET>8=po@_XY2~Xz4CzKoDJj#ielKf7|4HF}>6gv6&l3+oRa>>nu#(!=wa@B#AQH zL=wo$VhR94JbF3f0sF`t90tmi(U`@TP=@@Y;6YCdE^B%z{frNppW7rBYKTzyS=OeQ z0Sm8cZAyMagQZQ7fW7AI!!fzMRF}l1Gp>Sti4gZX|F!8r!-Bo|o9an@U&-y4mpQQD z#k&MMkh5_85Y~|^@7VY@AXIcLxYYL(`EJ9E)b{_WJpbGjNok6@7V#yNp5HjneH!uo z)#1JxNbZ02!{JX#di}88*WLFGr5Qr5B+Uw|m^*yV*jc_0r&Kab^yAh~CN@62yuI0$ z6y7SJR=&DJ0!Vc6yMx~yG;$4!)pPN)*|bM0&3h#sW25|UOuP5s1aO=G=ixyy{`cAA zXJ6$12l@Q)!~SoHsp(7-kvS7;0)T4DP)TTY0QFL>qbK?8{zu8mk&wxZe{fv`b}JNQ zAqaW7Gmu!M34^LXx7R5vPu`pYr)&;@`N*R0x60!iLbAHMkd5lnv%K$ugUi7J#S198Kpv943I1c8}Rk)5})3 zd8WAm?gP5JNNe%F(^emEE`z02(aWVgCWlWC4opdDbw}&sEWd5k9P6t#QyOnrCDWn- zvt%cXwpQRm83C(wDOsSV7Ux=3;o{%2%)+6~pZLw>=;m75`bhAz7%RVg?){UO|G-vt zzrmlk`+q(@DAoUZ{N%}({0|TEu@83mT4zNhTR$rIRF|tNW$mk9p~@<|XG5tN$MGKu zc9pz6*J%xp!P%IZ7m0#&8<=`q^mR^vge7=qe+Q+v)2W?(?RkQLqQ<`{-iow1Y(~1fI6>)~8_tDJ3NHaOMU}S}d;0QZY2lW2y*|RB?+}nA> zaw=e4Pm%CcB{)o9?uD8bty3=njZ=TULPnnA5-MCI_Z{E3zFv zcAw~m=sWNP+Pt{G9_FO|$5)))?9QS-7v{0D(Jgd%%l>Q-{eJCQ>Ah>CTNDphHNDF) z(9q2naSZUJLqxfNf$PHqa0|SUI)nS&hEv^g=-ucEWFH9K(IJ(5HweTH*gz_0hNs}} zQ_EwbVs3ez%~pPvQLso`vuG0qC~;pU<8b z^M5>kesJ{V{pTS*_&A`p-F1Y)FC5R$uksE9MaNdfRtESR7L3JXCjt(J!1ZH|}j*H1%T;WTeDGIJrFec33ojHN_Bktbvys@qO>*mpO;~TF7 zTNz6O%*tV*3*)|1symO(uN>#7T^&%sZ=vL)pdB3&fj{N_p)wyFHxluN@%aM0{137M z&3Z;jAYJcya0kwIC*RK0;TrgJcFy7@4`DYkzR#d-YaqVabOYbv0|KH00pFS?}|HlVk>c2e5r|_h!2%z|)J5h&$C+nUF6ZRqL ziBKjn8Gx<8MWfF4z`8|Gru2;C^vGgO?t0S-;HW*@@9B)Qfrg9Z$_v6 zTANG;=NCi$$E#t#rZ0i6UzqxLf2v;6cYktLeo%|kuq&1NqBauqbG}VwGFD%YM5v}+ zlk9|t=?6_JSwm>l4cE1qM0KS6i%g>^0Ox)hWR}B1qZMopEZPR6)!M`tH{lJ9{qEVY z)+THjCF?$qYi$xK>XZo!6WrEbK>4x;al1-cWPZ&&Lex^pV)HCfS(*2TAIdVsPd^=( zsqojNCKdQUzQ;Uh{{N%LPfPOulcU2g{Qp5dx%gNX&P437j^yDyU|t|n-;0?q8;)n# zhG5#^lFr##8U&cXH{WGOD)NEQ8)<=?j37mw;b+WgPj6%!=2xB3p5Dky>Um7c%WJm2 z)MfGwlO_#wlIV@>LbEq)J@g4w^&*fS8(aQQ4JVmWWfz-5*eQ61<6{SB8k^^U__JVl zoky`-)m(}aMAmyqmtzs=31bjkE_YO^A*vi3N7)z)>7=4DP<`QDvsh<**!Cktmi+R{ zDKIat%z=$l$Gh))DMCw-o#HPr&~2F6yB#-nH>zq;R3GiR5Q=~3)9oJaUl5jYl5JU0 zv&qar7a5&jv$muii_{{@5K=ed>WJ)$BR*Zq;1`Y}nB!Iq@#*qX23vR!pDrh2mYmQv zi#3LJ;XM`Is`;RvK(3Ip-ftNjD>UC_Y};dpu}wAS+s4QJyXzcuuWj=%FY1?beB*H} zeT@yU$GQusSX+V1bSk}=N0@fQ-wFD}=`-x_HuohIiHa{-=tqL<5T#w0@pc{$bib{m zQU7l6yZv3k+I8%RqHf>J{T2j){C&y7)vl0xs>2{$ZGgKIb6Ux0!G7`VNzZZ{gVIg% zyIww}x|!YVUCwB+PxU663EXlu>1KDjl8Sw{PavXF-iUY;pVZCHbgR2e!1mdV+-O+L z3<Mv=_#BDERe}9yTQaQ|^ttPW0!P@IG)yX6>{1=wA zNMN6~<9}11BL08+DSs$7r;h=Mkk14l7Ba0q2vTA6PeQ8ZQFq6JqR1R31oyohBFa=~ zoBUivs3!0+=!#;o;y#PpX&hA1qFtD2criY?of<29RWqcR`8;M`a_a<4!@Etu?NvPi z6x-S*kueVkTMhen7}|E(I&P)7SOsVYd2vg(>N!YHL`-fS9Gl!>Slyv|z$g~Ws32@% z4Ev?FsBfX9WwdD><~jvSegS$q-2^h%>lVmoIz>UX#8nMfznfPB2I0J7zMFt%?Y9BE z%QT%1l^|sj^KjnRj3V!2c1(V3yGix7xue;a>n!_da>72G>Epvwi^Y;GXP9kPqu%KY!Jl)}v2 zfA8&@4XF8X2-t>!EYry^W3h$&NuOiI=5Xh4R!eJWKy7RSQN9-oJ@J=~{hd3_cATU0 zs}3e0Y)N^r5rA_?SpbDQYE#!O_YjV`3A^`w6_o&g)Ln#V+X(yquB1H3pY=|<>m3sK zG}@vVz|VheN&zhD?w8AUR#O7{IWLwSSMUILO2xA8=Q{c9OT61na@Tc!xkPrnL;$h( ze1GI%-rv;$VDIhL$e{hzT^Lgy`e*MZKhw40vk!N?FmS#bxe)$*yA8o@{1N(H#mj{* zQSyeZiylP}hD{cF@fzN~+IlwaYqo0*Q8RCI3&?0;NiLzna~BBVF6 zdECSZ1akW2J9^R)$e0`oYZWS-9_n+3Sd8h3xHL510T=@G(?tGd2&J_9G3l-cB z`!i6){pZ8m@%-oc|H@P^xz`YYTk^ji9UT<;|C48j&%W^ghxpt}{{OE$6KGXRsc+J4 zo8shWZcYD$%w`_5veJ_Re{kC3JD)sY~ichtDj}{EbpO?La9Q6 z1&^y0F)WsJ$~)eyE;jXxshhC5BEnaqroY`fqoXHB zM^8%Qn{Nw{+HSqLZ4jSULVTK6LGzViO40{%2>Xfw68{du*H_nv2j*X-X@q@~c?6)` zlSF9sh%NB8)G2L}hDkNPEr9sfBQBOWPL4zi1z7Wxn(-tc7E`scOk%}DQl(Q>yhJ|$ z@2h@jFtGz2(m;t9D$izW+V{*$-e@YAaG!b9R-S8?r@sB*8LSoOx_z`8HeCpDJ;6ds zm8_azA7P-ikyYQf2f`aF>4xz7||9|rM=<&0H z{r~Xr@fZ95gM5BN-hh+b*OI(BJRq-Guq57s#!07E`_BG!eR%MvJrJCZ_xF9_$qu^K zsf(sbxFnRQ3ylPP#=Nx`FoHqdOU0=$n-)AF&>$ddk;YkGE-$-QtSH!?QptScRvhTm zYQOmnIY0S*fBgGe?ds~v6QL9Vt9?ZjgCsnZi2{z(+P^+LAnmr|a(~GF1V;M@ozsI0 z1qGlE!z31d3I!l)wetm+hygMcB9Tc$Ys1 zUr4^|zMEtdq&MbK7pB)F-(8DHX1~vQvPh@!Ted0ts!g+v57PcUFMzb$k|m@%o6{c} zU;WU1H@WQgdV|U2^3CA=<*@%#gZzsYB0lO<8nBSI$qYIf^u~j8XY8LEi0ywOzy0kw zkAC~xV*Tw;srhLii#x&3SJ#IJSMHHMu0pr&fVhp) zc+Tu8MM*!6sX3;M%tR1~8<>EicbJ_-dNGaF*Aa~=yst|lX7KZy!-E=>=O zDO;h$TdSR!ZsbSOXEv(^{0o{Y4TAhg?0;)lEDdOfdLE0Cs~^d`1xpqzCW+7olHgBd zaB!H9Hw-0o>C1MI(!3Tv` zofa_W4*Nit5#I1uO*jr}xmpfe@i<9o019peQ0*F_;9iVRAm_L}P<^{@Gn%?WubK*s zs{=dW9D|4g+4X&{{y_sW)C(6-=fky}C~!agiNk8Xjjd#gqafh z6{IYtE5FOsQk;S*P4s;cK?)=1>!{FKkp|#O>BcY_^^MtFlT=878!k;=T(wvY!Mmzm z%>yx|L8n5NhRu+SoP9e{q_10`jGi5(ivE)S1?H)*ID$=x`dY11r{A^vo2A#yyn@sK zM+dKX^#C4CXdvcxpoxY|HEaN_8(9}51#;75D@XZWOy%|=REv*?ub*+Y51|`<{3w|t z<+fo=tB)T?CL*SD)-#;O5UEcl&M)9Obi;!{F|SlH1~vNGw>|05yoOGBJf~5_e5fQX zc_QL<26+vn4gz2HGe8rwS-?X^_AG#ANkaPi>uemJ&pNAyIKzNOa~y9L+*=scQWDH= zspUf$GK%?12?*ItI&gHZbQaDl#mqypF+C=ifQ*ivTD9O0yzJeCV6s+n&DLbtCwn%8 zMomTip4DJ&I%wT2+gHFXF`}|xGxQpd%w?$n47+c&pe1FxWIma$;fN#q0CiW%pQ@>d z7>ePYib33Dm};1j2-`-sR`<+ zrf^IEMo!)^PCh3e97E?DwLl)CRVIhfTW0Mf-pkG1G5j!_R87xWV<@5CH6H8G+qltL z0=^8lT`%D)M*0v8t9V8r<~+>C*$67jxVHtEy>o|sMfIrCkgod2H4EitGB8>9GT87P z8H6hyix6;8Nk)Z4a;pDo0$=5Ehs)g9-{gUF`=>Ua-0l5M8aKCpWYfWI>0e}qM(hBC zL>rFrrlx;Tvd?8AF7by;`4Y1(G}jZ zSMEL7HkzEic4f0_420EF+6S0T&sfZbZ)c6(EqX-a*=Fh{;R0Mc2)+p;_``1w4$9!q zQn}a)I7;PW6UfKU@*tyFEQRKN3KB@K>N~>&!EmvJcn^gmk1iw1=ec9QA?V57ECL48 zY@ZDB)uy=W#pXbo95iI6{_S95<}YaCOzMoT=N8nM`Q){t`8!TdH^7rK$!kHIrl^l< z>FP7C{I9SCl%qDg+F3|UJ5wvl>Kfg%VWwobma+oif&w{7Xy_{q)YTWwh2+B9Fwfba z{m}9gUD(zhZKK~8=e7-2VpPF2QdG=*yHb25y@JQ~r;p)Dp2b&0>8S=%+s9RH8bJw& zlan5clkRwAq&$25zMGJ4?=6{3PI62Qgn1s8R+AGc3aKZnwQKktm?R4mZRpJ*Dl>#Q zJGddMB}_lN$Y|(iw^BrTOeBkF3_{U#t=PB-LLuIep4aEi9Rak*C7gbpNw0WO zz>Q`CWPG$&IypFV!{v){;#vZwgo_!`=_PI!LY+RgoU&LF8Z$DFX{aDW1yT~Q6$^}H zp9qD(uVo&ERU<-S()@r>+eVG|>7$f#g%(Hvekb|`?_&s|n1Zta|1@I2uX`d4nU~zl zNKWcXW5zNNPKW)T(J4zLRh4?u07!(LfjQshOS>waBI?Q1V-9`8EOfTToRXt9mWMOZ zAp`mKG63O!Ape z;4aR8Rho!No{7Q}*V6KwuM{1vvt2N|WN(B`EAtA9nFgw_q)z~xOTA(+=RtlG2VoSA zduf)P`AVkKjlj^I2~8K^*aTc>L8S{5SWOOU{>3HtlczJTR4*b+%97)L#7ku&mT(-X6nd* zu!G|uy>4D%k)Gka0-Nj)&R!Q_3X-I8NW^RgvdoIbD9R|tBxZ_eLR2__2C{A4ypyS9`4=M` zX#zq3M^cKBG|jtLubUI1Dz~C&-MYGkg04s^QyK(H>PU_$AeTF zzH&6!+w+r&en(atnway`BC5&p{=PAr!wbE>Y8>_jcQKZvR16z*MLj2~AKRn^1GGB= z%ms&#&KMn1J<%x-{r<=hMIS}~l*Y1i@`{C#ES?i?IGQ(I6rjlP= znK^yZSb0so_%bf5berU}Pfa0n!f5P)CdM+vtAABsAFP1DCh{jrK|q=3mQiLrNR;~g zBRMr^6u(lCX%q!(^RlCwn}>hjnuBivpE^d@Zj!`&nkLL8lx0L?D=e`viM3b`8B7D&*aeu@Cht!K#{>{AbuIug|iLun`%#plkX zGc=b3X$1YX)DAu24(G}pQSHH7ns^IsH#yN(6Wo%B@>t@SG-meJTB;6uy8#I}iO!uy zgMV0ha_PvN)jTCFQ<>)q%>v(C%qH9c?YU_=AS>o&uzlOavc>H_uY4}f z9nEICdi735Eb9R$^ZtM!5~B+Io%xCAaJ$T%xKxZ&?k7}U%YEZ<%G#0e+ty2Z-+*ds zQ2R}5WiW!SZH#_Q4O?fsL5|7c!NEa`G<4bQ27d9=mUVP&cl{a%?0&;|aT*dTea9TSLp8on-viiL2J__vp#nLHn^0B*w=PH-^sT|4nOT6C7DQqbf*p-kb0gyGKu{k& z9CNNm|s`G^YDdib)>&2g&oa^dVH7oRu z+j&ZwhufuNcV}WCFq+9pJwNov+%k)DC@h$_l!g?h>ev$D4zNrUZHd8tMg>3*WVCoP z6f3_u_(pBHwy0Xorq89q)nsyl`ppJ!OhJ5XSk`ucBWXKrkpRqS=&=bl_A|Yv%0aLl zfrh9Ty3idfu<^rVA&a?(73wD;jpRZk9pjE5JKt#p$bb1U;7gv!KXtzIqV!*WT(YHz z*MG|SUzoO_onmts{uK^Fa1>&O0cBjo%q|+XHLGdg!M$*g({N@qE#$Iiwz1hbu<$9x zH1ptXk(l|ZXC(u}nZK4~mWCe66g*jX9K4igQ7q?Qm=>y-if?R~lXV6^%N!RqLQIPFz-B}S>HHIhpT~4YL+VnnGo(3V zWyUNALe_A*IfO2mjgk9;r-?_8@KAh<+_1~Qx_xEl0&A$3$Siu%geFP9#(PUF! zEvw|a?d;y4$6EoN3W^aR2L7C#vv|ovnCF=YxL1%Orffl1T;MCA=|`luU`ox7>1@V5 zjB7y6GdE6>!7hgZs3|)QjG@j_0rTgeA4S|qB3gz}mwvE9Nk0!V*1u#1DI<3HTzZQ8 zuZ=d>JsVotWbX?6OTgpAI3>cKtFLQUaH3e;sT5eW>b&HMHXVCu9IJ~@w>CcO(nucq zy0YHHTBhGLPESOf-F*-dt#=+`jIz+32Qf0#mT^4EZ$(sZGQ10R%(L~aaMA6{zH)Dd ztUVb!KDarR171LKb6lV_p*sG@20|b_8eG+`Ok$?alzP``bdI?*3Q+M&RFNRc*J^6m zt1#6kPhG$o_jiR{y=CY$XklYG>RmzWAl7Y@R$9(o!A3D_gLVXtP(mj7ulz7ax%oqi znlX#SUB|K=w}O`mP2^ocL#M4^*-V}ZnT$VfWAnYMc7fhVoi+$V9mXPHeSP(Vlb`+@ zUz_wxlQzm`&RQd5RwN#K)_X}_Fkl*9;sV-KcX^W1_lqo zh7SF;$hll!>R@~Yx`_z`)Sor&2yu*MZ=5KrV^Wn~0h<*9r%cmwQUpjjppR(?sf^S& zQz{oVLRh$x=~O1<9}Dv%|CD`kDN-z+LjK_&@_8iGKQN2LtfINieW3){&grM1&2YR8QFOZItkK;2gV z=Sivl&(YDr<1hK29^!LuO%e%pWA{vsqZWzMI1-YzrzsEcw#q^eTSlvZ$3PZGF_*b( zEMatsVI5Si+lgqWlC_BRDA=!BaL*V`I!hCUH|)Na(OY1(*HSy`uGn!YbSRaWx#5t7 zyisos5B~jM|JU5tZ4dl0JoQ~HsJ8%cK4Xz$i)?OY55h;xF!q=R&}t+m|9k@IQfyJS z+>w1Q@u^hx#yKj+2>O>asM0tAkFc0AUo6%9c{q!yOybl_6f(SpP|CWRfRYg;_28`n zQmq*5LR|h*Eeh=BXtQYhG59xwNp-rAv($c%cdQ>!T%c!_Cr6tM#Q76Io9<7%M`8I|A3yza z|3Ap*O?PrW7?HuKPc9~dF*zCb2BXP$qi$nP6yT^!_e!r2(8!pETA_^;q8ey650x}*Lb4~J>w0q7Ae_l=)suz zmdS)p11=Xvi)Qgs)@y0#vsgdHnJ&33RTD8wU`Pd|pEH~6QAvC@gJ7B|3&c&cRj=VS zXu?C(9Smky>;Z#fZYUAryM@$p0(!)nU|5K>1850@An23^>Kcw%oUBRA=Zi#wcmrWJ zES6-Seei-*0>pi1Ovqh#Lt_PS4G?{{43+SMPhHepo^y4zh7jIMZ?bnV>cn5m4us>_ z#KE#&GcnpL7OcUg$~Y*h)hPX+hbFG6VfVe$P{g4rW1;cCxia0!GsLY$?NWf zyzchN`Rf5WJv%!cpI?lI=kLk+DH#vG9S+{XFP6|+;5|2r}HaUHH{;oS7bja`}d4GCAx?{EQbJgy2L|zPDcTZlb zsdP#2^zB)9^u7hn#)Gr*V4@~<{*i}Tl~*EKy00Cje@aHD z=eRW0uzPYs&IjYS6SL}i&B3E?ZI+1Zvx{rt$_6lySx<{}M}2ZO7{Bd~2BY(n_w`zT za56a0x9X_>6&!e5vVP7flK4YFiZ=>RbXswK=@gxsS6 zVaq5GYsTyuN@D6I0unUVYxew_cvYF=TOb%LZepG!O2D;j2BdV`39O`6h)pG{*YqGW zu1zEA)F|B|<|ekmICs*71dPf=(Rvh%1djw#zL1PamegxA5hn|LJ>xRtf*LtTIaG|9 zR0lscxS*MiAYvlm9ReD(NXBtM9#&^^8KuBcY{FUdhJ&)T8DzRP85a|w0U8cu;x_6v z%K(!_EYkVHkDR4%w@e6rLni5l81A>?#HCjb8t+CnM7_E z4=xO0J4&Yk_gtABS)jl;^mNRH;a2;sEXpw1Zqi66vO>w%G4dPddEvj3tW?}XOUSq^Orc2o(+V#K_3M|;VwDP5x zJgCF7Ji#j{p-h8>hKl=NhvKHa5I1tO z3q*t$aW>fw*|s6RV7PdbXP$;@b*W_7f-`|n$sXe?AVa{nM?>PXC8p0znA#7XNP8}o ziXQut>>W1iwHqq&mQ%Xp8kPz~$`gC7ifO>-Zp2aUm_{)dG4_!4nx+z^AgenGFMS=! ztADfe{5{z_g4EQIfyEPQUy$(3MCvs|_Msp*u?Xi%+oYZi<0`ED_hj#J69RLmN~&?z zg$=HP*OHj}Y)Rv5*&>^clzWb$y)kaL$P&1i5%z!qC}iPD-zFKtS(=AH6iWUI z*92_;@mkTr_i4sKF#g0!$eZH|ow8FNDC7Er>$DBaJf@6C7pHAxE9w0ydY$c(a*iEhzegR>Nt)TJ>69 zo3!ufdBHCi(1>c9iCD&20#(hF5^7PZQE4*==UK_&0cjNR3A?C`yB4>Gjs`{{H00sp z+GwhI(Xj=PKn-=+xUV79`sftNSv#it$hYJPfqi$qEdtCL(-_~XOQl(7!vYXgzGg} zNmnPr;IUt?sY{P*Nz9tPtF8_^OiN3-j+6+qRd-9}bdN{_F`H3z%9 zSY1?6Kub4@QXs%HNKrjJ(YV9H-0QU?Yb+tJ#@*35X!+y8b4Ui+>z&@L z4k45560l}C38F&4fq3j8vIa5GZ43j1rMrC2R8O-vVTSMAZKqb~jF?JRI#H6rTclCM z87bd1a6m-J>NUvgC0o`YX}N$pK1e9dmZ2E1+2xr91+`UM=2q%5QvXh38p;{NA`^S; z1Jrilp_4VicrQgPUMlU#S9d?3V@6Pooq8=p58e$E-TCl|x_tJ!wo9`G^N^s$uu$Uh zw(2X3m-U*VaFhp(p-tIXQK{;6aj+_Byn*US|kkvh_KY&%33h>l**S; zgjv87vUkoPZ?IplIo{RH4wBswnItq$GLv$L5HK}M4FGQzU@uOLaqiYP+`nEkur^)i zz(aF}S*_i0h_=JTpTm}zv_n9M2n}xN8UtmtfV83k_w_4z z;-Fx<&KA)j!x_qz&IO4!c0tl;4hoH$O<2JxjK5baw=+r?4g#{ED~5GGuyz3~seD$E zaP$epix|4gG-9!2N+K^<+h98#lT?-$4bn&9Q#5Ub@6h)LE`*k-{M&4ooMh*7O;5Y%MtxTizsK?S_T- z4A_-*&X$1J*_yD;lJzkbnHyTTp2kj-Q{Eb}0pMkp_fi z!{HICJdG&O;&BA3#ojI>QqxNO}t*Km^mRDzlw)ywppT$+$arg=aiX(=Pm;e zlf5-tW$hJP=+FW6CoZ~R**fYxv6CD-5>tne0m>gH&B_OeIn4x*O10B{YccVK>T-d9 zWYSL%h?^48B;uHmJ>CRUS&J-%&u8nFlA&pofDJ5^0i4X(IfFX(1Pbn;3(}2DQ?XJ<0>ZEQpsfQ$n_dc5~fpI8V@AAh%@_r zaT+inj0h<%Q8SV(2-G@3EX%LXNQB0IhZ!-8nvsY}X<4hLlIfRX1qw760%Oh+8n4x+ zKw4oDn_xIAacl#rAX00NNot7r-6R)<{>F-0bqB@<##mW;X_{z5BktNRfi=K zq0POnINpW6^S1g1DD}~>b>#8udaeA_sXgbwo^EiPWwmsqh2nvqGSKC4Zb6n*8ZE)F zW^KYq^u1g>DWy3h8bBb*!99l4npN#)Z5C?0}=d3xlG_VdeOoK!g z5|U=6v~q{3S+A9U0aR%<&d!7|r+R|46=|?pi80X7H;2`Fh@h6~qq&3f5Y4mR!g`US zX^kedUULv^T~hJ=NqEf1&{9c4rs$69d`4y~zs?E?fu>^}G}S-%)v=O>jDn2$l_II5Sj)S$JZO}&Kq*&k-eUokHLHs0Kw0PO5-(N4t<)?ra7-Kna^Sx)N5T2E4iv!jU8sU*Lc|- zHxL$2!0T2Kn~3FB6od5(Y$a}~>1>2@x z+hG^VR-(ct5W5HU+R@ST&hw*#BZJodX`j68jv<+TpnI1Qh{mwT9WuE9KP3FuVOBx4 zk2$U(7+X<)*(D6wLfIe&uH}wN44;4tM7GT)*2?><$#lBp3Hfu%Vq?EnH-t`W0D;_C zHvs-F%dU`>0DX}@-1U@N$NFSf&en}(12KH=;gYzQcXIZST;MRQi1W8 zu(vARNr-(6l)0r|yoKFN%hwCPUv`r{|Xlq{( zSR=i*47s#EjP=c#4m*Q(UwF>TT+fl1{jw&huP_;V}Q&-2z|KOm8iu z7e~ZG{a8&GJ+WM-;G9{nWu>&um)Y6c$BcISIxg<|@Str-;&(?)bW+mbJxHQK<|zeR ziK13&#ST0!`XZ39itb($b_5s{EK091AKh}uP4 zhq_Vr$(1T{qE*Y#r{>p3LAL6#%LUH%=BC|hL@kut$Dn2gqKT;#0-)u z@DOukgO4Gi#;w$*?G8o81W&e^DwVOjJ=r9CAT2i(${Nj%PC<-sqw2bG?8N7lxFl1y z7T7}F7vxyIUge7AM2w9n*n{@OJXKF2Q!<%s^%RnNQAeMPZrZl{c(-J0?@VzHbo7%Fx_oPW? zlw)>82uq}7NfqOx0hz|&Ii&fl{Td(MFe|pp*mCr0<#G;~e|&%=;sxh19)Xaqoha)y zH2SzTH$Zd68Njpf-qWU?La^4qFj?9awbr8lVcw_jN$0WVb}*eBL0&{6Bx(S`%UIn^oGXEfL~Uzn_{KP-{ho+89VPUuVBw({?vQ z{Usl51nffflxzX^18qLY*{r}Yu#6Hkp?M-qc$lJnw(3#3Y(GMi79`nEM7?HRdaOLQ z$)B)b{ECD$X*-b6l2`Oa#@|89UJRy4;K}{{)JnfM4*TZq2oOQ?N??J>lJsXV3AX_+Bb>pW#VG?2#N`mQ+1TJCfU?ko6#y|6go$DwE&IbD&x#68gr3K>#1I$ z62erAZt3ems z+3ZWGwBo0JrsL9zd`~x$r*-(4F+Id(=GD;~BrAgI@E-T<$u;ZZF z*6K6-tV4RIqnE?}V01q0o(#|5lTj`v1(+)u@7J4Iym1Y4Wim}VR4U#ZGbw|$qxJ)7 z=C%mU%eHOKYk-F!P$*`;AxJ`B7{y=#)QBK<3am49$%xO?m=MjNpOLlAyb^FjZVAMp zN=i}Sr3(fGmzym&qL`6@U$c5G1eZV^!xNHFkonYEGWoF zmdlh-q4=K?CQU%Z-B+t0+#`4?B9M#;+}<_+RMSiZh)H|2Nh;o!c!=BJQ9XQdj$GzF zEXm%qsqd$TtcX3b(RNyS+y>ah?rH(42|;-wv&r`BwM5`a+PH6-KN%{+=neq|7SdqG z&C?Q1&}~;?Gbg~tO2LdhE=@a&bS>^VZqQo(iP>E6a9*#ufb0{u@inYmGrE*y8@zdA zH3uvb!!hVpQ5wsXhE|tBb0m1!f`wh1|Spe%sph9?1)95so+qvsIBluC}o zqRx51<~oec32O6K9G^Q}-(4NL;RKH;c@;vIwa+w2H~7{b20SzmraVlXa?VCmG93JQ zhrApNCgi0$7uA;Z%vznZw%DkJoPh(}3zP!plMTkP%=Ek8rb8|!-B$zB@1A#aF5wvO4&K-4 zG7YR#4xP_qHixJbEr6$z&RM-Cl{_UA?rFCUUzdFeX|Vna!&b!^w(!XDr8=m92@?Po zi3ATu7Q5$qz{`$1Sr8=5ynI4K5w4f8Pmr|7)_ZFeq&8kjn%+P@Yk)Tbx zkOnD=l}fA;_qz@`eK#76C$EQRq$^29@Qrij1UkH}Mc`RSzeglYF^w4b6#|fiQFFNbeoTVNBt2x<>Chh(ZC&N*uG5 zP;J-R)jd;{0R@>`q-@n|fbmv_ZhT|4TY?W$F6uL2xlM4Q+bhz0xVbG_r14UZrr+At ztXyo@YZd4Q`>N0n`Pe;kxy#Gy=1N9a))shsTC=P+9bR+XowAv>Wpl+(*94P$X;NY` z5mCf~7TDmd&zU~2hX);UGJHEcAN0w)?s(iCY1=}7IO&~qhi?btiEej@Iii_Qj2@ZY zxH`dzEV;Xv?3PMQ(vgVyhZ5_X>Uj64qnVj1wUbn~nM$Qh7A$O=gw=%&RL~k$={26Y z=#Y$#Qo)*JuZxPOIFb5LJ_qc=H1oBuu#k6L#ni@n5GAVlYkZNRjtMxY*8^I)%tapQ z*CNAzBiu$%sx~PlY^KBvt<%Qpb|xq?MgjGT0UYqdEQMgN0D^fb6q|8wN{|C0%JclI zD4Lp^-Lcwt?Dc`t7BG*}{Q==_+5^`_p_mjkhCqsbO<>>HU^J*YPlcDRg8OQv_Ff1w zsvS3n_6zwz+JetZH0 zKQ9sGq5(r;mJBI4)Va@a$t~lk*MM{Aw1i-uQe#zOA=3>+UfM(n3`>b9)?sm z=ae|}+4%I^VSmsk-500d4qEk^PQ#@*_V9cHeo809!FWR6cHa!h=rrqk`jT`<@5!6t zsNW**UJuS+55{Eh$FuQZGHH?FTh*`Ms@Epx-Sdm{)A4(9Iwq&8)w|(j&?4i(Nf*); zouB4%`>1B;r#jJ5Lk&C}oexIcQBTe9Y<&81FqsTbN8J+(kW2>SZ->3XL|q`bfXUgQ zH+(tlbx%&-+q-81`HJ+^>NUNT;Q+E1^#_yTt5J)*9gKUgyQA~&i{Z)e{C$hO9G;H` zlL;WP3)b>s@8YC8CTADpvs1N{^Wpi)pk7l*8=a2Y!_mv}aP&$iiQ0%!^$i9^o{lFb zSCXC+IUUz)gj2U$0fYix_$5P|G{u7y^WJGSsA_e?L^q zJh@ObtA6ec#^>GPi1fQ}yRU#`okRL!Jm~gbBSB6E5E@IK)oXhND{oWnp!xlGWY4Nd z%@!fUky_~BTSYjq|AQeM5?q-g1~tQz0TT4+JsFPr!||YZ-Xguz(PZ$?7dTFf4E{KH zdv?+tzi;7rQr`_nJ(vg_KSiS_L&$ojmjew>E`U^k0!}CAp!BxXpRg`Pmq7dKCv~2O z=aUxcj)#-s=vBQ2#C$p~@CP@aTC2n_`lr1M^$To53)W7CFUjPh_qtxgb-x)6-T~HL zpI)5w$?NX71M+5c`fg;1`0R8t8REu~)0dE(tOrfUgYM*X)O~UC9(pMr1jrrqTQGv% zNxk1k4nY88soH2Y;yrA(0)QLan2_^RwJW`gw-*X?-|CIV6>0R69Ck6;TOnDUo}5j{ zR<9MXdz$gv>_oqKua8>`X-@GnwcG*bk?X!ay-=`U)@xwyI_nOB3!Qb}11UN=@hp0> zlyJETxZ~GruZG{MWsQfEfw@S4i>Xtf{&D_paPsYd>>VC8$=lP>`D?|;PfkwXnS^?S zmoF7+zXj8bI@MVkL4}eD@>6>oZDQZBXdtT5X~qFy!RD4E7o+}Qtft?ckgno97boXc zX$AGvdtGFtX6$-xIO-4Hj^J8DOuRT5zCy;+%BGkRfHM&sJ3msrhK&A2CK#Bz_~NZO z7A9OddtQvqvU}YNeM9%qnQ=2OczZflS8aDhPDcZHPIy1)j!F0B`2f%8 zX|H!N9uI8V%R#+{V^?gjB}Q{~9w`9;-Qdq}vCBbdS1xs<&J*uC1%Oo_A_*9{KG-Zx z0lkr~4Xi4YstH0suZeoCn@=Iu^)iQkCvTDyC8krsm)sPX28fDVBJ za=I>rtLb!XE)cXAn^$Nw)nk%F8zS`evs9g3TP`E!=7Cij}8*Sg@5lX|!6A=+3!AQ0Q8rgM{f zwlFd+6bXc8<=*29!0cm%8~cn8YjeJ0Isv49FT__xabqg{g54zZlG4uP>%U9H-}!Wn zSzFxPEJz8EFR)(o8AF}Wc*$bb6xB&-`4YYl!8H24;9){mECgKcqd$)z#zi&^gbhFZgndoa8ycyDvbFvzYq2irHA zkubplJTT&8oT&vD6Rd@WmUt(5p&QDjGv$E4v93bnV$f`du_c)2h(h{-!^fQ?(kmp9 zIiWWY#H@%?Cq%4t?|=vgYmctikj0ePfrx#0S7fpnoyy}}0Icf?`Oale#`NJB*E7#R zV)!(BBl^HG1!n}D0}&smpY5G>dHN|PhK=Z&EJeuHEfVI=Q)V5~2C!T{Q8dXKYyFJ!AVps?pJ65@?4^&0yS1wv|)*3Of!RD&BD zxAM*a)YDG{G)@+OPU*GIAZTCJ>NSt1a1yn9bmr9Wnx75BG){WxBBXKtM8^rL{Xd~M zzbD_aP!R7P*Q!kmDpq5RUrKenoa99 z?K$BYpIit~W@v)eCep1Q)l={sNEChP7mmOZ^bBKE6<7#iK+hCO0^yl}h6xQ5N_qi} zDd{PKUbpHsXP3-U4TAM6p}NLd_TqrPP&CNpFonJ=MO5$_6fxA09x_YMSy zf%0$`^Kfq96#es2ObRp4nig83t=NotbWCaw%j6y2S_NihE$W0rsOEAeLb9L1;3Ct(|LQs=v~yhmNZdZ4ZSSv3lDgU)oXSZ zAg^VTsFm4(Om|OVXYmdpLzgG$%rtI})1nHvU;&^I9^lhB$=nwq8I|Js zkPm$sG4OoJT>z+RrJ=?grZ?1%p>|kvk?BU(FT-T+b2A@FOn!p+E~pI_gWCIg%jqx1 z%FNOrAZBy@dJSV|Zso9f28(>jE{jV{i7lmem{f487*VodYlDP24@t=8BH>z0K_>;N z`JvL^!E*pB`kArJW`Bb>IE&R#a^um_iArq>a$TMmpjpUBvG zrJt_rwHft*pR;BwlNgmU4jqYz`J9I|$cpOOV0&Y&v8)eLewy6{-}ORb>c^emkr%pJ z(IDHqYN^$fHG=S&?ERdFX`H#pPFcui+$kJjLq}XeC=o5Kf;tVdG!@!2u#hX%p3TDp zlBEJwtcVq2MYBr^V~)9qEuS+2SXIcuo{L8biQy}NpYd={XCsQSiFZJO!oP}o8UsM(!tKOC33%G|L_7SCC% z?o1KSX~_R#!|^0)8F(6Ew#xDOqCGD_o^h*TOgyLLfjE4F*h0U%r}1 zwV6m(!D;Le4h+QnbJslk^2t8ehX?x*oxMM!EAEM~6NU489^;n!kB^@m9u?~UJw7~m z@}>UYLwx?1v(c-1?SD{bvOav>AFGw9|NGDX|3{A=z57r9?|*;v=+WQE@v9e){*V9b zfBe56J$iJ;{y3byef#LQj~+ex&yW7o-~5+Hf2*22`kP0O{yTs4|AW6DJ^BX#{S8xZ%>XrvFuP^O#eUj-UUvQtf~_p zWPv4rpzG>_xQN#*V7jp~GTx75dZaq5s=K?sU0uNkzEA;qe82mMN4~16XL^QaCK9GA z^FGc!_uO;O`Ja1l+(5!XFQlaIIA|R7!XAntIOuskZXBGc(h?0HH%M~53TTmd_e}N5 z(h6t>5dxZ^3S#xn3IO1a$Z0oLXXknK$Z4OccH=nYm-6f@LJ)Q8l4V(SQLf8!Eppm5 z-1lR6Q1kH=Ba+gYMfp7#uNqnYSt$@OZH59w;yd@v=ZrMSvPIBs!Al5V5ggi-Y zT#DQ@I=WN3qdt1w3;X^NY?`<=!_xHa02fwcRZ{GKT-@pACpL*Tz zJ@t>?{*-?|^L@9U`sCxKZ#w_J%Ikmf$=ALA{#AGWxATvyzv#Pv|0&5w-t%pr`$F&M z|LWcEdBNM>^?w$h`)d5f-?i`;@E1;h|9<~ze=C0sz5QJ;`oYK4ANzvG|IB-Cd&B#F z!fpJ@_rKT>U(&t$l+90DAO6VGKBr&a`h|b`*oR)f>3;Q}e(1Howf}|}zj3Ehd&{4{ z`{JdSzV+fW-|*R&zol(H{`a1J@Q1VV*SzU_&MyAr)H9y=nD=Y{@ii}h$rsvJFaN@~ zFK+*r_x#h-U)%re&TBvX#j_v(motC-vF0oAr=E1%r;RUO{LZiW!~gxhpM2df{_Xet z(|Zq|dF}bX*#GLkd(Urv^*8?8w|xGGzy3|H{%Pat<39F3e(O^|a2?#A{K88<^Y>S* zOWWV_*57{CkAKq?diQ_V!k-Cd*?LF~F@BhrFAN#>?c-a-O{qm>(zpwiHSN`3b z{`LR8|06%P;QrcU{`05*@)du7_t&}q{Gyk9&&%Hbfe*Bw^ny3s9qxVT>EH3&!RMsQ zaN|{<{Ijo6KGB~4koThZOyR@X{)>L(1K;&4fAO?`{ZpT{zUMVx`)=z+@A%7iZN5(X z<(I$t+h6}HKVg3UU!MENq4wg8m_iI)@D!(#(?mPeHKfmnsx_kQi6QBNx zZ+`Rd-Ti$(@fG6t{nO9=^f&(U+U(x1x4-|R!dJcN^qs9YJnJ_f|3A)HXD@u^bDy=c zdDrXT{=MJy!=HWUb8b8H($C-dNAJ4g{M@g<@L#>IZ}`*?yz$5G_=dN? zxcSDf3)=f1`=Ie>U-`Bt|Bi6m>z)}gE-uqSm{_j8anfHA5iBC8) z^?vd7PkqP!kN?f|>}{WZ*%$u9x6Lm+=du6vr2Aj~OTY2H#*hC({iD^nZ+iMafBHjj zd-dy{`}9w}bmemQU!T3}W1snY=Ubor10VR4Z+$m->-WF&dHQ31^vpk;dP?KAUw_p{ zf8qTX?mQE`;q|}y6?Z@NRd0WR^32bNFZdVdFK++jJ5Il=@yd6e-F(c)m%PR3soNj_ z(O*Bf^93)&cV9g953lHc$~pC1*y;aL?O&L%Flh(<6o`)>gsgA_O@sJmp8rjV~5v1@s2-pKJl10JmJ-^_~ie5O9ix_`Man7 z^6{T@pY*-=`ak`xQ=jWT{gaiyn0fsZo^t(HfB)GpddK}wc>c5h=<(L$!u##D{|Wx_ z8Bh7*@BH7}e(#s>f8(oO_B*eA)|uaO`)A&`^5=Jb!`uGr6Yh%N{w(p|KJi08d-+FR z`G=o>@~>+zd)@c`?RWpf<{iza&Ht}&`R0p%_)}jvcw>0~>HiORuD!qipuV_)-KFaF6+`0Xz=-}cg#@BHNHx4v`kSLUB|#~Z%mldpMErD?wP z!nyd(&wAz0DA99X_e1{;|IN1;&$*D?_S)Z9|L#Y=<&AT%-+%18KiL1t@A~-<-lg_l z^WO0OKj=O2dG+?<)8Ml|`B?LtUh=Ll{MQ%$!#fsV{X?&8{^FnB_Ya@{-PzCH_P5{o z9Upq!*S<#i`SUN;|NO`OH~rY3-hKLa{^s((`;CA8@n?MM4>s=qgKzsccZgruf5Eri zUy3d}s$-<^o9 zXf6djcQ~kx@4TZ<|B&Au;5uijE3kuVQWfAh3U<+%svr0${Z-p{ow$3ZDpf(-^}IBb ze6K}`ht5>d9`Xan;fRWpv;!}Qc8FrNiG6~HREBMiKLC-6ZSxPFY!R85WWs0tP^b@ELY3}7mP2~<;% z$Tx|kS>&e-B+(EQlBfVlH(5DJH%LL9r3o4^HNgOqZV5V&G@aC9-z`bdfFzP4l4%Oo z(07fOQPS_4&N-u!U~CU|B^`QNbeeWzk&>7uBs8!nt0Mo}lod-brzKGqG$1RcU`<LpzL?co)&}Yo1elj%}?zue81WXz<((k%NCxIjwps6#xRDf=< zFM)FuBi#}#{-epd%YH~^s@p^~#5NI8embgrd2@&O-&cHXxVAEOW(|2ptB#kgsR)K{nuerLOR6qNimGS|XquW}8oI2j zB-s=+Q57uAhxX@baGvWP%rLy`4?); z&<913bV)QV)f(u8C>Vr*tQZ4=sA+<3s){O^7OF{VvDMM6yjR0{ilCdiY)F<4nj`fD zMbb4{GeD6CQP5;vmqghF1&#};i-rCblADq$sHUmNx>-yv3NCA}kXT@FU`Qmvv=r4+ zOlp`cQPp)znbt%>lnu+!b!wwTNfvdPOkzRTOj!{XplgDtYldc;)2c3*x~WL2Ma-CF zD3T@(q%InkU`mE2T1GLsX-a~{hqaJw%7V(6Q%yltO-a=ZV2XliP%etREh?g*X@+E( z>a?W_hH06a$R_)k`LTVB!Il(3Q^`!3)=WXNL{-s<>yaeE(ly1H)?`9Sl?($kNvf)t znk*Ih$;+yyU>T~Rku+6M6kRfDqGYP3D(ZxQE*PRAifU1+lA;Kju1l&(+>mOSnr`ZV zsH-HJk|7mSo06#rqG_3?GEmr(1x2(B*__raK@)XLmj+TTS+G>ykW^~_v*dwUSWM1m zA#oOxrE1#1EHo`aWg}CZg@id>(hX%Gxha{lATntT(2xX~iN4sLDGG*UiMl>$Pz+13 zWYe(JX-SaOJ6pM`dv_zbpq=~WxBwZ3@S<_8vTGAvz*A+vT04bp8l4@AR z)TSg^L<>b?{tAVO2U0Y{G6x73QYDj^Cs8DmGf%d3K@vF$SrjBTElf?&3{^5j4M+ra zSc+n5#nh&xX@YJkhM^7=)(t^6RaMcaO;b<}QIib{fHcjp41zvZf2Bp;(eA16h#-NfR|wEt)x5Qw38q4NV)ciL$N< zilrIyKv}~Q6zV33t(6o>mw{|4f+Cr^VN7e9V9KIyS_Wv!7IEH|YNO$8)X zm5H8frl4wyp-K{vEmcsM#^|CTTe@i&U{SUt!B9*|k*A4^RCF@5gsw7pXZKsF6Q)+|ewr*&OWG}*L>QI-u|P&G?al;RAONnocDqeWzOi7d^W7-l0lUMEz)!L-Cl%0=rfYsAmWhO7>ldRf&3OOzD?VGD)H%;!EtH3dUA zR6>~GB~F+KP?t=Ra3LvzOw6=0Es;=F)nrov1F5>8YnnnvNg^OzHFaWLR6#a1U6#P2 zEE$5TkqI^}$(A6SvMJMfKzyJs%lb62Y?7fXlrk|^R4pV~7bKbKu`CG|Qz25;V&bIZ z!~sy=o~+7(p-QrrN6D*P_6;qqCK|Fx0n}dZ5c7ima%etn4@kZqsOQh)3 z)X9cEFfxiN2|5c`@>E5U1f5JOh4#mkWLc31Qbk>`M9DM_uvkdeEaDe*P1nGpB1?i! zF=R2hsYsTfh@xUz#bkvH0n^>WlrN_0f@mt1rJ7)|m|OtPg=Ezb6ic;aO$LSb3ZS>Z zToE`Ix-ubI)&)^FWYIK1vzV+1x@D-ktOG@n1(~5oSr#nO&{TuSP8Bq23yP_Wic0Yd z8OcI&Q=!v^YGjdystY=?3+6z3h18t6#X|BxS4?7usrD5^1I5$?mEd}HKuV@A$fFX? zQwL&$LUKM>&0=z4WKClCX;4+vf}FVlP0G76A(^ghnVP~BI#13gxuPk8$)i-o5G+G7 zEd?kf05=R%mWruO#n1$mxq1m?*TO*(b0E<)BvCa6=&Cx2 z{A8VshhYgi5%J)JCvc1*=3?qtlW~ll(L`C0WZhIv8ZTJ7B@&OUiHe}6m@$*oK&mE~ zx~6I*wLl`1bWM@6G;ThoUf<|vZU#f1~ie*Xq9MEp1P>Y zvY?T%Rf@??RaOL(VHSz_I=b4Ss*)fZ1pZN%EGwF1nN-LcLr8_xrfNunMnjomSrVUe z2dN37p=pW%R7(*ghGQrYmkA;x^3epD&q`SrEJ+a!QkYmb7A?v;fm?KgQ zOt6V;i6&SauY}uRNwrjgu8b(fz`RK~A%K3`PzA#O5))WY+4x8LTXc0EJ0Ny zOEd-w%c>x;kuDM@)?3z9%^Fx77|+^qvrUr)i3KIPK>&my6GhiFT~Ha`Du7o_*9D8% zDpe)pBI}9{bu>?`l6;&Q+ub7$(w2*9Rf~lqr=E}117co2!^ynj*CMN_8Y*Zx6;>IUSRJQTa!fbS;w|U+5(cs_@bv@8bA?` zV1Twg+(Zu^)Y3Fr8?4+#kUkZ2@1VS7ag4k)Ca)fhI<8-91#uko8eAS(avX#WAQqbl zp^v;98fA_5dSsZj(*xpBh9uF7{aSi?^)U?&sY=iFYh1pvb_CT%my8#sol>wYYq)eR zjNBfKCQd2dXp3GW-vEB#qash7#Uf847jo8horp*tWi)NNz~~qTbX^~5aEK0n=4TQG zdA@0xb7QzZN{T7bqnT5!Ic3Hy)@=t-uNDRSgEK3eQcUxh(gK*o!AM=#50jX+M`d1% zpyMXE0aPZ|bR?wk0N}uL9Wd2wHis3{^)ZTx1t78?qn)Cm4VWP=06uZlMI#wqp4?`}FMmw+*JUu1@vPTmO;>vq8Ge2ihvz5ez#Z-BR z8t`C?Px<3)uwJw&bUgBY1?)ZP4Sw0p9-SmHI*4n~b31+mc&Hs8)mhK=cWd-&`Fn>9 zl*$Tzson3F{6p{`zohITYvUOFC#jk=g#RR2(n|dIaD1*Rn}^OhE8f0x;nLMD$-Cs9 zJ#@~^_nmW_{l%?=@T$C-*z(53?Q`?u_QqziJsWf`94;Iz&CbM2vvcv%?EJ>k?2OL; zOO|G5_P1P{mET+zuU@$zUUFyrrRy{OrKY&Qv?lH^ZC>7Anhn^0O?7{1_AI=xANIDl z=0)eq1&|S@Sv#EDZ~7NJTUqVHtz{8zS&4lZ zTwG|LOjdw4U)LZ(q4^Na|m5&&o?{ z>i**OnTxjU`K=!7!zK6ZJ?ql4wmiFhdHbBUy)?hs+;-zl=yfkS8&a?_H^0`BF38)v z;o;VtcX`Y2p52;XZo+fn#n#6D!Lqj;IIg+3dD!W1%F9dW-1dbfSzF#-pApyByYcFV zsG#-kitW!tmv^y*b`L@{?FlPHEDj+p1*pqHg{orQ`&~hJu7kMF3h4iDcanMcj4^jJvxn=O-F~Zgu;HBRcA>m_eRFeHgon%h&GQ##p|Uw^ z`|}%1yW#aEDUe`q{v7nS*S5E$ZAV%@yLlztUGO<@50vX>L!}o>|WklP%mEz*Ozyf`j=(t z%H>0^yS1@8zdqZM;M{zOwz`*=uWamZi1B6na7Nj_a#mcGHqT$}*^0dtuWW2Ci;lS5 zwP)uqZfvaXwPrVG*Tp$&<#KT5k?JXk{im-4bpsy@ec1DEej7*Ie_7JB;pcy4%_yJ$ zcvwE!*f%Ou;LL*T9VLuvg-B5_W~dQPnC_Q&V7OYi+1TrAA*O3tJv) z0F3Mi#aMv*c!tIX7g2w~A*eZjthE8EgbI7(AlOBHurLcwWv{kt2N7+>L7^AW7t5cn z5}?=*Q3He!V%Lf$DHN3P!~bZI>G2h}OKk6E_BcqHALO;vHYduax}R|BN4+>V1?lHB+=YpW)A<*=KWZ1@x)zS_v zKNGs+=GtmHsTSJ6_7WVU2)LL}E%wTvVtaY7dx;mjB(lvrz8v_q^vanQ#7I{G^6kJu z4mj6b0c$AQb8SQ>A-&?0%icfM(Ol&h91KhnQak?PPahUQAjj@==U%I(?2XZv%vHW{1|2R@MVL zvO^}VkWKX4^+7*KQq5+1Kbh^J+7HD*EOnwe-=PmjP9BaP^kIj;QI!|YhJou?4DDU^ zhCAOr%Fi4@@@95#W_Dt)7^NBhLO^~ac^`N|$MrK_jsO5hSG>8MNX0cqvnF&SN(H0+Ipqam z`l39W*2~+=Er=un2jXrkfKkD}wt~Pz&@aFWcKP&KN^6MsBPBCXlr(_5!Tt_TT4_wP zP&mp@$BJ)B+?JMbvWA_oQ*%hJeoz1S_Nh1g90P;IwDR3zu zU3v*!%0jUVnSvKw8zo3t7}YB}K~Aby3~v*t<=iCgTBj7xQi=~k$42`VxXy{E3w;yi z!YrB1KC9)V^g+(1Of2cX>Bj2f7{%OJUCcUsX<@brR(SEFRc)XsV5BFr z5Fz$vjN@4#GVj0`?Zf_!aI(r9IF=J^2O+{^m{<#;jS2>Yl4YgA0A?rV$MxHR0Ok&0 zFC z?Lu5+jQZFjYaCaSDWATfHp`;jx)I(pjwUlteZVAHX4nDP366u_u%}^iCgY6h0#lz} zmW)xvTnjz{ue9I^giOm#l<U#H3xhIh-k9cK#eec z6k-@I{9rJ{aHpZ~2mIv#~L`fJ15rGn^MzHp-ML`lHO{w)@7^3JJaaGq+ z!ZpXo$KVIvHdrT)a*!4wRit#>5bxu)wZ$fi;+fUuanpHqIbQ|YS_3o9C9t-(NMFt3 zwuuAg=9Ay5UzGA7Y8xE=!cN06Gu zwWbF#E-X*}2X8e_y{B1^I6vPY&vR$@xWfLE< z&pW(jHgP906Zv3;sc>S0%uhg$h?djg{14%B_Tq>mcMj9~Yia+}{_u?IAoNQ- zC_vjr;pn>hRu{3?=mdaG4ZsS5xsAmc&_eAXLTrb?^RgwJqfN#!1jcduAP^qun;rB* z==aA3lq`qpr~tCf+qR252e@sDMv=$NI*l-f8DMfjjAC1bhpV`#^0CFN$Y!N|?g z=DyS?ea3%wC9xhyF7zJ4L%_$L|22ls|H`tal;^)5j?b9$zoOmqx~=oTdUufF>rP+o3#r2s3^55p{%>Je8 z?7zk9Gy6;Pf#2+HdsnZlN_KDa@KPt}ESz8M!>xII$vvAmTL-w=^L8^Ho8tbZt*h$N z;heVZiU*fw&-NDASG$+iXW~oibIMX*+P$=X!CgMw?k%l%4wtt!ZumU#^7_nv(_i*% ze>=P?Z=Ss>FYh^9nz$x!Y8g-dwrBgxVM|u^h4WVr7O&4FOEcji+;S4umkoVk?&?9y z-(1|7-_+LDFT}0c3+I-4o6_ca=iH?$%iF8#+e@1pT69_2c30(ieN)=rT^G0SapqQ= zE$OVjc73D2xiybBcjtSn*Ef5YHx5F(r=7JoySw(>s(;nBuG)Thur_x!TAvdS*5q(* zY16r~G4F`m8_t#Gd0T@1*_E~VWpQ1$^sAz+Es5dA#`$?|N%r*h&0S@+XCH2F&M#hF z-#*-$?Vfe~`Ta{*W}>Ytn`f7{miO1^mc7fegI8t8zqG!2xVAChyV_e_T3SEb+ms!7 zwYREY-iTFaV|#H~4)N;YX52a_HCH$1wNL1kHaGUS{nfM1m1VIdEh($=g^P}KHd&uvP}bJxx35Z<)k|CE{)T+8x;=M6zIweQ zUp{m$wtCVf$G})NcXqcAMXT94^N2YQOzgkig;9*_od~v}59>|x^JX3eKFef(Y(OP()XK=?i>aoIuY#q$S)Q zU5`uOkk&(SX>bJ%s!-WZmy#BQe_1;tie?|E?$kw9f1 zdNGOw2W7p^o1Fpc<-F+W&{+GX*x&9kKNF|86_A0)y)Td`XUn`xRc3?Y!hGnZSWD zWY(jSr!G_DlK-yp#mPK{?W%38E;c}&C}o~gsZ6m0NgfE3C=4(HZVz_YvasvB+4&_D zRg#w5r~sDFP7FkdK1Liw{RW6o$Hj5f7uZu6LT+~JNy|0KKee9gyFJKO2V&SEZcwYq zswhahYO0zkS~ATG6VF=-JlF0wz(RXDh*#Jvtt*vXNzB?$TWo-i9SLqw8DxmRbB7kk zl}gltHmSvyU$b?UsVT65(ZK1gCR=P<4Lnq*PZykWP>9(3O#>egLPECQ=dk6pIw5zO z&;toZbDR{Z-JlMk7PMU*vz+5sn*` zDNx*bq`v_`yI6odco_I_A5)bg(yPJ9ZvgTS+{XRoXINR`UeRJvz-K8V~yqW|7X;LId(fNkiZ2n6hP>12-N zNBrqhRb{FIc{$hbkgjDjn~fRiPtWyN3k}jFcu+_ZSsH_MOa?W+5|*B;WrE4%&jkfi z_+f`hmco{(XZBD8J7^w_?G z92Z9k1;4Gt>7ZD^A+oPgsrG^hk*X1D=ZJMTJ2o%4LEQ;#T#q5%#q}uhYXy8%x6}Rm zTIjUul`wE-v!>3{rW)Yh`$z|Ru0OK}U5~9_(xK?VgAE@R6DRf|AA8EWfa3_psMEi( zKG=b8qcFzx{UF-)0_e~r;w?NB;dQF|*zwjJpj`hoM{VBIw0kmKsDcy$46ER6KVNJ&5{%dJq?Sv&e(}HDm|AgQ;2o z8ovZdoFytGk-z?by&ECi4LpZVg-ULsDVX-j%*1vVIf;j&X@G=|0IE0+qTQ-+a;2f$ zGA`Yr;U>ADH*~up^iVBp2iGf8Q~T{fFuwE0CyqZ z^PKGbsJFUA)tJ79njZ?Hl8GYN2SJ;8=at0s*7$jRR*U}vy#&*DkoTaUP#Wp8DSSIC zb27Z%$z)lWaDh|H5JR1Gg*o6_7`b~eMzz#w*C~DKF~XeN26rJf(Ce?Vk{3~bEkA!< zi4l(HBiKV^wv4f_DLw(!7L0M7{8LS3UEPz2w5w^Ml}VBY03$nz+_>K)aq$7O7nR{e zGQx>JgEugW8bCGVG~pcE+hHl~;?GJZoV8J6qz&eTsXTiyq)w{ngSmA_QH*PLT8ay; zM+&d7Nr_@Ri#ag%>bwX}TC70fCG4GpzPf&=<_Cle6yamKP^?tXD-obP6NX-&VHdVHgc;uM^#Y$K2#m2s z`hyMS)D(Z=KY(B*asxUN+4+VzApQvxw2o0+nPM;eFBWN3Do(KP6LUSA9wf&L|MSb@DaUMME%@6HolnrkLjM(az9s^HI=a^CUYuks&kQ;g!HC9K^z;TY*zZ7%0eu z#mW=~7Y5Es5XH*_3mT--Yz~CrKqGKW3v&lF)F2io4TsYGb-n>kgNSrDI7njbI;?!U z6@F?lfX-RyLElDETKhD006~;K3hu^?z`h%IxrNK>bELbll2-<6^r;P$Gt&>*(A8g(QoG#;>E927G*p0QU+eX2?QCPiB-MJG$9 zPBB@PObM(Hoq>g9;vMjb(#&EyFfu%Rhc*2XW`oT+&RAaxe4^qF<|wiwAuv6F!1F3O@MC%L% zk)gzN&Qgl5-(hx>8&o=3$k=jgnj6zy)VI9=?pCPvOx1)a$`lyXl|t%}dR3(lngXcPG4cOr!GSm!FcdIjvfbsKK5KX%;_t zzzSrBowh;H{&>*S@`6rX7G<$6s&!GWyFKEX=s6`^i-TI+MYXg|7`iprujK@4gwy&| zewLdSw@K5rpk0H0T9q;}%vl5Y9oIjgd3<%qhCZ3)G3tdLnOtM1GbumV2~Js{E$3uM z3&tLHE(U?`v2)V9W;#S?c=>gze(5=HrV;!Blp338 zHN+mj5xl^ri$&C75dTioO<64JP(#>GM&;CD^1)Fsi}g(P)D2 zqg-%AZr1pwM?`rvM|naT#gFBV+mOQVwD=IK6=u@%Ri7HjpOS10j$_Eh$e z(U>8x41$*_06e$n#_4j99VYbt)gJrF)*~7~lFcQTBt?iGl{!&z;=`8%2c_7NF1Q8` zfD|3FlgG!0K?OJO2M(&SCzRa4uUDojBxQ{z1W|^7)&q}U+44Z!%+?JC)V@v}dt8IQ zQ;V}EDCEe0a_2%YaJx-H^#09dff1pxAnAL33i4(sThNc)TAD%_OTRbr0BFH?;|!$a z?52n7e6YrCd=^d-CpCy|3g;|)2>KMdLC*{J0lU(ZhRuVIoH2+*%4N{u&7@nFHHSX_aOOD~p&;0G(J&!0-Zkhr#6_Ip z$W|2S{~82Zrsa|l#}R_PS`v9_IY~BXNvT1xX$nfxnK&r4+CFM!tBBb^<`|X^IXg*= z`Loo_-=&Wx4DL_xGW0mIAFx#p5>B$C>V-hMxs=!_ zVrjcR^xQ*wT7Xd)>{7UA*Z9e3?l$=&p+U4*geOz^dPkK3&~%T73a(MfLYJbjT};)7 z_j;N|0Mu|$=s|~-07zDuI;>P6!1Zg)y^xX|8>W|aLDqR1u(45|j>VeOl7w93!O?n_*Y61Hc__ngT1mDulwxq|O%4?x`A0YylR6d1qA>sM(1}I-jh!Q`AdjLi)H;!P` zr%zFigpX_)i&7t8w&-Svk!KaBvc1r$;my!ig8QxDfMu|?M@dj5L8@{C!WVs}j^6DY z!%23xNeXi4XtA+j*vTr< z`=={(d7mxJ(yx^%_IHIpAx+Pe z2Yx4eV!ATA5zNV^k7uw4If(oOMxI8fyzs!<8!2-U^?B1Zu4`#ig`7g`Q(zXgT_4e< zOUPoI76Rx1*$MGsx%z`vKZ7Uv?mWS<>~Rve%|&#LUJqA8{@yiex#%M`=|w%@)Bw6` zLH`iCwihH0U38&=uIpb%c8WA`&GqAepgLTmgzKj{*Yxr>7lUe)_yX=$0mQ^1(5E?E zKjrmy3}yp+7e(~`wyCtA4T`0zxLajk>YT_ez3;;pF-c5{{5>}cC}hqag(yOi7NP?W z0)Gq&A*84HgFXcOW#h^W7F)!O19p$z7L4sKaRT&y!Ha7RQdbhP2g-cl!XCRZGHA1< z%jy=;2_Rm}3vx2oM~+wldS4W48*8E+W7{ol}fyL2!l#&^b7`*HN8&`?&Dn0Nv}o5+jp26lk%r|&qy0o=0yE>Rxd5i z?aZt$Kaleu8Ii5IvpWm3bIa=s>sNQyFI}8l-kD!moNIvk9*l?`s&mU%A3cCFI7)ST zt5SizxpNDbmKz|`3f>7#0EtfwA7U7iOk-k@Y4n*!u*{XD+h^3Dk-GYD-NN!!fkZ)p z({ncfwt_v{?;hMGur8w8Ea`aNumlUE$L^<%{7NO=jUvrdDdwDyf?jp3eI%wDMFedR zSxV?QlkJ-cG7P^<<+x4tI{l2M;R{@xyp7{aOTNj00F)L2En!vSp$_y|M> z5OdwjwGaS75Yn${h>>ff0r0+5NJk*5K02Sel$ZH--?a&=~D@lGPb&8$YYP{g+* z(x*@zR*ECB@Wvl-*9)DRh_+%Sf-`n+7Ph z1%9y6#rfK^P#N8YWG82Q?2<7*fuk&n#7FySZ`pCH8{m(y!Ui*Ch6)obP&`xIK1f%5 zNsD6{6e>q;2Mn%)rNnAJiupn_$@B?^Dx6PZ@ri3As)=cnwX8{g7?f|LExP_L&d!Ar zfb6(zpMGHLl2epZ!SdMrO)qSGAZJ6y*r}mye!dPuUw=1YbDuMi z9!)4LqVSH@#t<1gb1-zQfra@rHNEhgSlCk(*X|RKc6yBaqHzO34fITIl;q}^*feT9 zPY-ks?&BIUi_Gee_U=aA$5DL?qkt$aDy-@A#g?VOK@FhFV!qg)K3I^i-`9qm0llnm zaDg8Hm^dz7`2jc{;&iAaItX31%A1|sGIdoHhl;VyicCD&fHc<@7;a}*Z(c-we%pQG}$v77bYiQGLBAP$fhZ0riVCT0hUd1OCkT=?SP zOdLmUD~SiB&IqQr+%?%YTNUo^)iTWv?Q}~N_<@5~l9uPv;<$ltchdh zV9$^1EL%KkdJV1!rYpWSzb|V-d(x5ZNk_HU4RBf`!BHJ|VT7DIJ9b0DL>hy6Nyqgu z1?&5e-ZeoV6=05$#8sFcKB@461>rbuCvxp>t(CM|9%6fI^c8_TKOKGB3+!E&TccyXTx0N~=;fg4Q*n90MZL1uC%bR_M> zb;gw?;Ws;xoF60y09?lbUr(LNt$DgNoUY1eSv+ACb*NnP@MKU2@)LFsYUH7ZqL`oY zW6-B&ccJgogJn~dtd(?=H%ln$L0)Q5%4|z3%^j39yMv-APnsg7NmBs1aTi6I(1NJ5 zLy8qj1W_@QwYfyV^rfgHCZg}zUkr2If&wpj9@Cu@4bkVmZ7h%3LAVM7XSJhzpd%*-p)??`!|(js9{CCg2H zA@!13YQ{7l@X>h>7y`=c<_d5-m;CLOY?v0Dv<@Ihg1&X~_B5ZV27h@36|#%TgqkI? zrp<9PJ#Mz~CI+ud%?`j!7Rs^X#`cMVh=w=Im)7SRV4a;q%ZX4&Ku3UrR2ClOtCnC? z$|Lb|ssIFgj*M}kr4U7#yr(G}*(#jR7u|MIUl_J)drBJ$7JU&q}l-0S`D1zBT}nX$CYQY zt}@*iSD@=UM`gQq;5P#=h)8qkD@|odng+6_O#|ul*jDKsstr&r9L@#L1~bH*;;|Lj zNxxCiWdiKEBv%r}F|`4x_K2sM@%vS-@8e6wutSFDuDd5>_e09{z|Go2pZmsge{9A3 z1{3bCOe9%NXYRN`sdju6FF4f(@DtA)%8FpvT@L~;cEf4|3<|31IwKlYbz^>>hGglg zW=OKGYq~sC)guA?sMx9$eBn{O9vQ-_9e5rLF>)pj&!Aq9bdGn>byk8g^NZ$oC(W4s zY6HmXxSWF#G4TwdM&ig2j&j6=YK~^)q8U}SYV5`yn;nHEmC;#|X;l>L59!k|E29!0 zG!r?x9McRvM#?Pw1kt7kj3-V{n;VF+xny<*)W%ff^tmWJW@e3_TXb%RA;BU?%v?X9 z>v^ONah!bt18pDAC~V=tOJX#h764Toqi~Gs=bs5JfTFmnedKw;{-j1|**)GbG9@Mz z8!xvLbJ2+q@=wHsG?9bx8F!Bz`Zr+hlj^hIe~pJCfNt=hRwr>C?=VQF{Y=Q zp0p>@mwIF7B*$qJxj{qCc0Vl#lQF1_>A}K=zwC9iJ1dZ zJAp>9^$s2$kcy@t3FbK4Pw~(M35^#iI-oll(a{hH9fWk!)R267@dN2 z*?5P&^lu0G4sv$LB!r%KFF$E;=K0lU@~_XYP6Hgn7@c{3mA<$iImmf_^}f5Fxw8{N zzk}{ReR?c7%rWN?Y!+w81R@^lAm5n}q8^NoC>EhM1x(em^M9a7pIR z+4~PMyfEc3Z(ySHI@&yDrNpE2sg^ltl9ewM<7S{*8{0qjncxAB za?Y79WOqlOlddPX94SgGVUWtSl5}Kz{DPd3vgGv}Q{&Q==psBf;v!-H|P-rBB%#YE9 z-W!HO6J5Fe1&-`iTDycnB@DV{U{JaJH4y~eghd~{ z1VLlEP=cUG4hSkYz|sQ4V5r;yo7kaBy`Ilp>hZ8C^RIGeZu0#&O?`{fkr-4Yf&#XyWr^ zCr3n9iHOE@p+rQFI7C!#tEB}dAfs}3ZDP05+9fP1VbLuEi^|=#2lRZ|iJ_4^<}R`l z8kNxK%LN*hdu%s`f6C|0CU+~XT>_vI0NpYGsN5@iAkUkf9RG}N-k12N#6Pz#{z(rP zmAhp_1a2G?mCv7*uxLz135&*8zNKJMxm`996y1bHA-M!aW4cg+qDLMmDmTp10+YZ| zxnnl5TWReQ8I{QBmO)14j@biy9_{4lsJxn1S+3T1F4mi^pl!N zWz5R@$sSN+dzE1BAPmO7{jYpZEv@~CeNOFG08)>howw0PNgw!I&Wsb=hw|jn=%<8H zw=RrA)E|`_ZYhD211j#x%YC*ZyOq{1SCh)sq+13AJzDnJCPJVG@S3v|Lm;_)E^YKv zLZC+w1S+@J(gH(JsN7YX*sZj7iGxZUbj#qNN7Js_jbow7S6-hS3zfGPjebfjbjxF* zax?8@fTw)$Y;w2K+U0gmxt()dm4{?I=ZF_~o!o(s9>yshcK{GnauKOC|WO`46qW*e{QK0Vxy)EeN9*1LR9&#MJ5_qoN z2dl(1e(9F&9E9!24b$0O03Ziaa$sI*+Y0G{k%$fwVZh=GKk1)9P$bt`3wr$&v{ooPA&2!(aevH1n{P)-zH|D=bNjwDJ z8Ab?xnR(~sAYM*9uXNH6)Jexta*U6*VGO;Xvl9|laE$zTXD{%Q9@=qSyt{)#XrrA3 zBWH)s%<*3Ok>_mFn+ch8z$O(5*ubSPeaL-j57N`2;`o(?YR~m2E<1d=`r(uBUECUR zydgeBJOmW$pNI$Zq2l4BC$_E04BR|86r#vR{b?5RKVMIvNl?0x<{f z2K(!&v08*Jc4T2v*gaT$2}Zjp!jlm|j%dD|z=|Y_waI{`9U;U43!gS6S^;ZB9!Fr7 z4oo`33+N?i8LC|_0F(;=M?AaA|r;f=YQw_E|ZmAvsIcLm^j(86W-UnGz@)C)li zm$84IU&j8$f@SP~Q+X(3f8NNKbL=loyruUBPBWCje;NE23+2K81g!F?kNk&$N*VTp zGVC9zT!#G*&;2DQcXeZ)*e%2UTgeYUa>ITv=yY7a-1r|LkU7*vCY>_)&-2URzgVyg z{%sASHAR10FMMrQr`dmfO#nI ze}4eCEhr<{V&W?KnAd%m&-|6o{C$a^`Llyw=(*6h(T*J?etd6wc|wzC#FPE-BlP~R ztmj}R3_aI|OohL6%L%1p9%b~=6!4dKl27u>6Q9Mu0l!?{f;9Rm{qimAmzRV1(d(L* z1HV?f=h8iw?zxzB%ev=@kJ{gWdmgjET)OAdJ>RPCdBg8PA9m=SgO4uXob~)x@ym~n zUHC84o%HfZ+d%Hkev<759PAAr-!9Lzm2ULr>=I)ihPWHVJ2(y^*g-qkJw!*GUz-NR zcbs{CHSG4WYeVmmyhpsog#*^yLj+E(@Gg(JRj>4@W6G5tbxh&Xqdws0p>DvVj?ySS zYUxqIE$vagpz|nmsEe$}r9UnGX|ZzYPamF#u5Q4eme;M0=9m5yJRJTM9k5p(mhovy zAakd4R5WFDI+$O2(}BvRH+^^>*tr33T0X-(nqPWTa0`3WM?q{l;7lK2WLlZnRu$V_ z)PuPODB@6l+-H0^FdFJ?HHPE-Q8lLH9=rY+S6cO49FNnY@h-RMw#RNV6BUXg>PRN# z^G)@!8)6=8A*vfqrdneSi4S{J0`?JQsCG=!o+{pT!;Q#W>-+XmkN>4ZXa_!y;h0B8 zhZJYP+1xiY2rgxMSp03aVPExw{hF+Hhq__-IrZxlUh{ zL@*0+w-vx>=!8@?p}weU&b3;~!J09wb9wegI&}I(02-z(j4kG1=7q+UM(M0&`WR(S_n9(?fS?8$c?)w0X69iY#ARpKI`b5bk?0 z1pBV%5lPsAzlWk2Ib=9o-v#U?Tclcq+DsK1^|}+-Sm>~(gdpnFU1EbgkG4@CXt7Ru zkQQ})m*`dq<1U>hO!*t2z6Yba=eFv+V4dc9K?kRKK_|_lSqzHcF*SJ(D@DefOgGo> zG(a82c3!NIRRI9{(Cf#p&AGC>$ll#)2hmOvIxt3)WJ6Yxq5!09+7R(Z_nEqumaB3=L2FxUJ7x^(iLvvME2z*c&GNNY~MSCb}RG4;xDX>oJC7uVuq~*Cb zXhWM2NFopPg9IcP(U}ni`xr&jx>8ngWZnGb=Z3UZuvETWw~B+tBrh`Nc7eV&JwZDN2A&AkTrJ5C=UExWs(#LCHa#Qy`td3KogI&mBR(pp*p8;E$wK(Kqs3=C<%Q^$}$RU1|u+9-awFf(06G z)%H--3NSUy^!kc3ncXG1ciqLD4Zizsp3)6)49c@~}yk~74*;AzJ` z)9VLOhkUVv-Z1=HTUsA8IfnFo`b5f(d-2WDb3&*>{%PDig(T#TR>+CEN91@B^&6l{ zzEvqO#CSi59G05?VxnDh9c04@GflJu&qH>cYHZu>^aw_0@(Pjdwq4`^8nIKLK{b{s zxa-?q;t)9EdMv32Tt5zIW`s!X*xjSn+d&a#QOZ4uOX|ZIcnIP+n`|19(tZ%_(pGHf z-%f{+!kswiAR16mu0j~W9*R+fbNsi$gAzPlhyoch%28egv!0Vdb9=CZ7}&@oum-p% zfqlUZ>bvf~+lgGKzH8zduSN^Uu+spNAjyJEf8#FXgFq>+B%a6KAKL&6?d2d|i4aCU znadI4T%{2Q@Z6qDqFe?A?J%L2xApk%9_mq+BvD+V$Pq;7o&@2c8rnz&jg%*X3~c5-jkqt*FyxK z;oMLUgYywf4!~M79k>G2o8sC=)pML$76MNgA<%=-E}!8n+99;sp|){7V?S#!{})SraC4 zkw4a{h|GB$Bj^;Pt(?~ZaV)fR`!UTd9Ggds6^vkHcd5czQ z>!}1z{1V}2-^C2*WZ8tz{jO_w2ikKnh!UR~+SKn7vy#$Uh;PRLdN=@KfU!%_Q^0xe zgPFxe`jfFsAc5!Q4Km5I%;hw3Kn!`sOu?p84-iCj#e-X+bm=NfI#z~W5bRP3Lf;SA z-b{*_xzGsfe9-xLr-Ms(hX5JVR1Q~zgaYscbE#*BcE|u<%hr*&WuK3N9x(uTEshd9 zP9k)Tx;)C%z|5kyeSjaFRuKvcK9lulK$h9k(}0^~)67FL&cb!};1dDq6>8sjaTbE|K}c;^Mkw^~R!aX-g zQc?r;26xq_ScnAZ!Vj5y*-Fu%>1#(pk8-)Oz-!Wa8}3HBVl<9w3LD~p_;31jTS02M zRx$Dvp!9K!dIJI*qqs%)mEnkq{_6y;ssKh#;;Yi)H|(qi+d1A_-H3TkX_xl7V4wXU zCc=vzgmz>kPNExW;Ts;mv|>4W+31l_3R8z9Q5MV#ERo z5FrB0P*h;8PQrE9Mip9Ol^0+k=v=<+Lvw3m>Vs#4l)@N&oh{mv!hgoc$_Nd~x3&AONSO$8hhXS%mwelE^A%a54HskHXJ_~u! zK^#GV{5>}cd@8*?7`a4dm`ZPt3FrL!`pVAvOKa;5P!$FGzqIYux#iip)rIAAJ2R{2 z)*7HzX)6V44y{T!goG<2bzi1a;~1 zMXOJ(PL@MgC9>XTq#M8#O^K_2)Isq|F>wkk_`rskM{jhmhS*-VKtjzpLJqY)Bz$)q zw)L3FA~z@`tZzYkH{WYpOIj;*>xZjG5fTsej`k|ZLR>O7fwPhchRB7H$jsvq0d8gp zLZQs=A{18g{rnL-34o8{{UF-qb98X1Ay_Wv5`uJ91-O1INPGvx5p1_zn`BS`TU=&M zqlGZw!2zwh;CCW~@ql_9*IJOo545>C-db*kAI@c>+aB$oT}iClT^Pmr_F%frg6qqd z*5}rQ_#l38ZHS^M>Z(frilR9DS5tNIHc6Eg(J&NU)Nd0dUDEa2fOxYydDJJtF^p~# zA9P#8o%=HRNCG&AV$ctg2;6?}y+GJx$5NovbNvmBB5>b*x6=joY)zfmbB=f$6*7@` zlNPdL4|GtRV$WJFN6q)%oA2CJv-1mSb`=QVzWc@(YA2pYkHN7rwE$elfln;|nd-d) ziOU$DwUo8{s^Eoy9X%p&1KELtz;gkzyFsNQ3v_`njhxxuPrBvG_J)xI_FTw}d_fr8 zaS1g2zWY-9pYO`fF0V1CL8EOF!MHw?DMv0$P$%f5a1v<=c+VC%Wr3c&s@!`oaN8Nf zF0ju77#qJHSwwQChH zijLSCLzMV4m}pP~h=r7MQILdLQak@$pG{uQ^aJq+XL z=lLsihMW`VgFQ1qNX~pu`+X=OEl&yI0n&cX%G(7VTQj5Lv0TfB(2)0Uf0wo{mTmbm z1EH6L*J{Jqx@>JpTU-0um%pC-UVIVZ^a6Bga!$VfUpRiQN(?fV4A_K3n)8;E{Lg=? zB5bY;Ct2jmpK>6t`L!ivLUYAYQ)^1mq#Waw9KKZG3ivM3xV`~FmIpjG@)M8ET^RKn zzG!HcGIbl1Q!B5vVhHa~kJi}+zbKD;pFTMV@|{#Bob#DXPB&7jJ-(9pJ6;V(f%p^Q zkYQPpQ-IXi8t+fxR?0b&)Y$F348v-Ylu#TozVO)`^F<++q15a#)IX*H=G;-gvX?+U z=O{>pAvQXG_xj-C!zG6vPm#w)`1@-Wx z1SoHs-5Xqz;srp;QZLOoF%J0bVt6NieC{iF{5e%K{Y*kNex@<0r@AmNN7M_x^+GT5 zand|DLeYGfqJ?*hQtyg$fm3{g79`42%s|4zSAp3gIy_N)__aquPJ>)+Hmk(!H(shF zDKJXTD;sO=dEr#K_E$jaO_y1HUYOOhgVP@m_bn5<5(LX0YR^BZ84nF?s?E(OpTwV^ zZxlB7vmE4c-I3)OJHD_Cr2YBfNgvr4L1}-UynB6dy!ZB?gdEP%uP{F$>U(kgb!}l$ zZZpAfIPr4ssrcLFw=MMfbH8@yEO6=pgTNcodqn}gG_0Tjn9}6NO9J#vp#g0Vhr=RS z)qnQ3U35feB+9M0Gy?@(0!wj>MsTX~kDSF4+LanEtOu==DcB0D0<(}#ywKGdZG3E= z`D+jT$QBVoKkc0!A0B@c*Pxrsacs_j^e1-P1l+JJjQutsK1(cuqXh8!qy+vOQBw-WI4XA%L8Y)9EkSCve zvpzp!z==<76{n$6MACh61mUFA!V70>XXuW0cV6whcs2ajmf76zjjF%o)4cx;BinoQ z(Y*ghyE~&%dH)Yzym;FGkMY3%zb1<@sLZD%#Ngw);ArvZc&xVn;H%!j$5=5drhpt) zkK3z|i6SRw&;u~fa`&Mc@NtTn;U%?J@(!g*7Wrs*8~!AVvurvgA5q`Q8&i})z_+iA zyR->d7KXRz*USrr5vLamk1n_O6BeyQ`IPs;N5Rkp`(DN{VswGg57`7K5$2a1A=?4x zYEV^!NLncv2t(`ALS4#%z{oYm{)XE-oFd7;ibgc=4QLbZNLtVwBaiVrRwgyU4XDkp z5(*=R3`l*{7*%Cz3v@y`zeA`W&j9@K*WG8;fKy{av{Gd4HLQiVO9L1uBub~K{}1N; zgY`?TiGG(^XLWxU)}Jx4e7i``Q9h|k8K!rQFmY&@U?|nE9gc}@@wfD-Mt7iR;@6x0 z|LUXuMSu1ERNn7eizk(=wZK<(uhw;1Eapn*aNFIVHls2LvaHa zq9Pg4B*4N7dy$1HF|O&k!k7tjm&F8JZ3XkveEi6pycl5#^%V;j0})PZDGT;WL`y(# zl&+^Abg`h3pa~5F65qvW<~UrqY%VxBkQ`JNh9n_U09^gfaESesCNfQME-5BcL=#0< z_TpHP7AP&m@7WZZoiH5d)RzNa=sPMOa^;BH78O>r5i9b;8ORcEo7x)0NA;dUB%_2X}0t zL`yB7g6-5pFQ#JSfBB`=xFCeFl|n|>30ZNmlugW*TsudUUNPw*n&9Wm=B_2wf8dUt zGA+w=(V>t?k`yTs0_#QiCYN^dilSNrr=wl8a^_O@JN_E2hm>ArG3sBB`lv5lzxtL# z(3IM4v>lQg#q&UQXA*Kmq5yx?pCI;IAN7km=YG>su%38PuouR2?>Jkao#6($LzCSd z9Qk@Jb~zeOahfGU+vde_LgR$M)g>lsWrIij#Pb_a44?BV<@0dgi@ga6NlGwt_f8IF zG3I{nG1M28rRy~)I^C;RVmFTZ$QZF;P+H12V5~#^2~NFYBlkp{(w;VKQdy!FeSDFx zx8P73a*=3e;9>)rFg{17+l-J#eI|F^gH6U~jRH6L6~t zfX%TnoH(th392l)Y}F&l%!^|jxVarYDoH)AMne#L zg%@MgHw{orW&7r4v$&8K77PwG_`)yn6Lx1py4wy22E1K&i;C7z*mE%VOaBy=J4g9hyUWxnf$GVc^m$GzNhKzm-K zaU+(BU6j4*+DMJKx!NgBhncOC6??%yJb_bNu;{}GXSu+vKwov`@LrTHmSs5nv;Jw8 zNw62Zi$b}x)h`p2R?^$VuM}u;i#wCUe8*_RMy6F)qyup-maQep#bULD*w%9^`F#LT zmZr*uMoucEScj*{r{(Rz`SNv&(TUx&pGGhGVcRRt8FJK3wEt!>#= zTV58M!k_j?$X7x6KvL%|^YIETJN`y8a)x(n6rgoo*tR%7;Nvn}q zV{O+GSc`-Y)!S~D3bsi~*Q;VoO+M5euV63{>km@SSS&3ija6-6xz+OF&|bX$W>SGn zl_`aYu@x%<(JY6Oe~OJO#qLKWx?+V9R6p}den}Q<<(Q0H_Nw&{HI|ry5Oyx(4cM;Tvz_+Sw{})E1{zm2Xl!Umyu~9{C-mvrOQx0*> zN~qx5)ZLcq4{~)Uox3C$!^;^4Mx)G0$SIv@GWaDe}@=NQgB^t#g@Aw zmRy^TSN}Qq3c)@l(M<0oGtUc&AUvGM5QL`%Bj44?U7g(5{2OB6+ElQmBIU`*bDTU3 zc)Gp(uR73*1-k58oksA1fHe&o)Sqf+G+@+PvXgGsE8ixeK#FtaCGf{_vYg4VE_H;;rV|q_Flg|Jid5;c5u4UT=4bb+1|HD2N#FOhv$cT zN3!Dn=>doZENqCqm!kZ@wEjZk>XqR12Yg#H0Yg=UL7)e>m~z-ynJob(3>5R{3g229 zm#K@*^)A9oKqljcOTlu;KiNAw`{~{3>y`;97QO(X+n1j+)!(0K-i!p()w#<c--xNUu;f(?NC3ME0wL3QaRRI> z;qCd!!|vI}p6xRVRV*8^v<1sxKlrj;74(|5*liV7M20#PQ7!-U-J6R%LEG8E>5m7e z7w=CiTAMukMB8dBMK9`<1EnH}G(sp9PRW62wrLFedq+p#?(P3@zfmj=b{Wvx5f#&v z6=u&WDEjWr#mV0J_h%Qc_s;jq1N!s{t6YW|lT*s!HD5z}x^xcn#XNT{O50AfFtqbE0P~|b}%;Ir${iC7`R%R0hD$CN@<8iUMCo%@v~!#Lc9Q1 zL5n0!Giz#@b!X6)QE}8=Z0oys^{0x4sZr)%q=Zu$I2)u-EpL1GxHEp;XSWP}ZTkb$z)OYt-d8BWE3*HA>$ zIf-Vy;(V9U(4g2P(ObywTE4qrDe+T?THt_Wi!l!vE+F9f!l?EWi78(IAS?vPJ~r!oGM%tM>d5QBPn#Xef{zl~nJ zcv;E+w)^ra{?lVT75Ruhx-3jB1m%{=NeW6K_@E`7!G`5)#_)k8X{R*A!0}y?C>SG| zgp|MS<-u{ss3`<^Pv!UI#7@N9&+R!Yylkiw5afrzj$L-3t%xNdGhWo(_;)Hd;QK%^fwMj!h9pV1oX z_c`*2&xwo->6l=&cXEhkykO6iQlSrhU&%CBe-rfui^3qb$~8_VP-KQv{taA9_#Yu* zY5(VvwMb#6>oHO~X7ZUHqKaLNIzZqQ`%EY?UfqtK}FTapf6yvu3DyObGW*Qb%tw81X-i5#4(@g)> z!D|uhEO8RF(EqKStrsQwzdhQ1;{PAzX{G<&gp4LlzaA;tuf%?T6>hyEi(H=8Dl8PX z(HPx1fDaRmBPA#k_S*tsO0M#}**;yjE@dyzrAUY-v%{dxxLsGK$&6G@yv&23skX3@ zXKxa^ctb+W&eA0F(=0J0i=s&P(P+=-=S78%ZZN%#TiwrfYoLb6rJRKE)%+TVI4via zYih-;XEf)9A-#D|-p2u_q@7zUR?C~q;KgB-;^f*3&#+IUpnI#<+J^RLdzkk`W4V2l z=BsudJ9pn)9w7fkMytg{NEjx?|-A0+qcq{294hSS6E5`CB?6m-DjM5~-1*PkW2D5Nx#^u*p5}L-ZTDz^w>5S}oD)B)d zjNd-S_8KP%o|2F6Pmg3Z@tarE=fE$FLNxFE_({n^pIl?~e6)C8QP-P>pYjJRu3spL zP=B*;D_LLx4O>q>BlXskA|I&LMpT_;A97dTpUdVzIK+!h@z0N52>#3})Ul~NjB-Yzb+&z}eR#zK&m9$!ooy0~E3O)PQ;6w;lVQ?bR7Fb_w9nJmhE9!Wx;jP!6*Qr-y+hm_wF?Ml1(EOG4zZ zt10?_#EHo->=E`;ho2!dOVRF7e_D7S;dTD5aKV*Wp6u@UG)i%#t-hS^QGd_(ah#5k z7sny-fqNK0)CS#>ZcDb4(kmPlnlPo4V6U@G?06KhD=P zoPPcO{EhRfZw%97ATK)x6n_4kN8yJz@FLIM+vjWn8xM4t7@sZ%UQEg{Fhm1l_Jkvj z@Sfmb%}&*O-gaJ3zm?ZD+4;!W<2q1&-*_opnq(seC-(U>xH==s2Lkoy&KoWb7_ z8}#!s{jvMiX7PoQkf7LDe{(Q71#S(&uXG;nqo|Od=;znOqp$X?d6DoC#6mcU4&cEd!-q0)xI6C3p zj~*>(mJN!pLCPSPg>(!QE{%o_CY)B<#K_jKLe!np%gTuE^w6IO90$L|96IE`agR4 z;^poW{eO(7iT-OUVabAQ@%P#2;@PxOCiY$&vw^1gul0)i(+HSN$2TZvH&d72YURpb z#Lg(j{@Not1Mkc9teyif1xj6qCQo*h~}fjjOd7P-{xUZLCxNgoPoj zFo;*yLBupTvMsM1&{q~wYiZRenEW~RuUNK_8a|jW1*3B9chBW8-ETdH!F{A((g^l9 zpnnk=m}Lu7)TFzxUn079*?bA<1cKEwZD~MSt)XnN7*>=(N-Oc2Y_WvRRGyu0^`^uF^J zS46&+)<8CIE0z2M@Sji5@B1|J|8wjGA!f``k&0Hk1+>Y3M#HU={Aakkz4OHXKgP3^ z{eP3Mzj-3mMjU9nA{1f*)JcV;rTmKS0cuVE1x!&C<(HQ9hbLonOe0+3te56MrOZlL zhU}}>pN@>b;L}Y1WAA>Jf8uGQ|64m{`Oog?jOV%_ z(WyvErOoJ~a^2hGv$7rzQtDGP7*zOy+remPF!Rnye-l zFTh+s1~Xf@t*Jup(<+-#ptH@WFw&zMXlUA9T~el&3O6TEwL;V{4Wk(S_R9iV>D!zd z)FmOSWpNZUiMP&9B?&7kCvNSwI<4)LQ@Yj;wY*SKIkQuBX*N-0YqqD-im&8+YRqj> zYQ>fZ3KerU6(Roq{QTtNEZ41g5}X=^mdh;;EU)r-1>>69N>_W=|quB&FDD zIiVW5ET;sJ18GkT&~{2UU960h0df|b69?R*uTn|@QKvnzKvR!Kk^!|WS|=XWMPq#u zy!|&l%gFyCoZisn%8B7D?=Ayuv;TduJu1h4+IsmE|KU-dPWu1#Mk(M%-FLZO1ncL9 zWI@tH;nNL9V2l#YfYGE_6@`C6J40aP}CIF{7p0TdOHNQfEb*Xfv-=+hOimEGWqNZJbB9IMnkY z(WO3=7;f=mB^KGn9#0GD`kZ_*6emMHmPskHS{+xA2B|#j9XeXtoMW_8<(W$hz&hpi zQ*WE>x?VI-QE+{EaLRJE z=>iZ0LseFwK4sX!*2dJUNf06So2{@KpsfOY)0H4S4zI#&<&CdS>0~!UIhIkEmy^e@ z-~p8U$I1ibsQkTM{M9HL5AkA|2D`DE#+wgn)J?rw*H$sWq6+^h>OU*1PAkybBMW#@ z(7tiYkdbl$=%oC%c#1Yw{ZyY%P*qITEierw%VWgpv=rVjvzb@tPv*_4oya+5sR}n~ zF;ys(QgQvjaw*StkDlcwO=%kE7iPX)^PFAgUM(z2^Fywm9a^pbME^Pxdx64~?!T(r zIs#fMu4qf8U0Y!*TnJ4({|Xn)KP-@#=TPO&E(vM5iF!&?e1J);yhSL=784cD$t2-+ znL)*r$AIj(0~|&VUAwH!`fUQ^*cVvdl=CzN_n(4Vtl4&1=dIY@{FmFIWwQHZMDQni z8t;Enak8wwzx=e^|AxDj{7=K}?I-`wM|rx+|4O2~62Je@8a6|^S2Q|Vf%#uX7uM*t zDT;J-H!Uf#KcUYhEtGioyVJ|Y{U6gHAS}rsn)oCOW_YDQfNk>s?TY=+=*8%1|3Avp zS^g8#;I&>0-@;lb%k#w+9bAu0*YA^Rz5lk*eu4tEc%WSuJlQY4 zI5~W6pec*?*n5WMftzIf2!H-IW9cdXa!jLBN&zH>k4Th#ER=&Xd(ka?&*C`53mm0h z2uZe?y-6_?kaFuoeGR>#$%F*R6Z!JeIfkTPXvQ0IHQpCQC%+Ty!N)P$eeq)ZMGv7V z6REu)wbMH1EP>v?2-nek{$Kl#5dUjyINGYl|9Z;*{U}c-|BpwD{|F)C>#W31Y~=Az zahJ6C1e#+vRkTYPW?2OBwVg~!=a}V|&n%@0c(;H0q|>W5w9d$HxaqsTN%C?91$Sxn+u9bOfTmgAVvRQy1$Czu zJ{59xHO^Y`^4ofiw$9^jWge@4{uz(1r{@8l#`8a+;i@s8&8O}B-`c9g|J>evlK(!+ zv()(ycTYdT{Gcox`0ycP_&^@^r!>SHHmk7eISW;5Y{Game~JUXLc<@x)LP=7Cbc4% z^~&7nF^OywT6sqT?0bokaxLfEti|#y<*Hnv8!##2lXIGdm~DupKGGJdU2NnaFDCL9 zDbD*3&+*3;N01v7qG{y(fhtfIMG`DZUgKn{{Y}&tLI>72F!iB7!zurE z!x;zuM@U%OuZ+B~oGr7C>VmNii)@8yZzGln`cUcYi5dC5p4Rh!!dG83yAK=Cs{h>` zmhJyvKIwlS=UGnvFVFr5IoQ7y3+uo3eCL}>M*UV)e~bju%;ezGpr)rHfCEn?=cWS> zrcu=mtwrm|iZ_+5kFv5moBV=(ewTMp!Mjh!qihDJ8oP4k`L>uPJ{>HTr;+}bl3K0W zN4x#ss7(KNcAxTpKFZUX{#z2K)%f!A&nvp};^_L3DZ6lFeHVMbX4puyT=uOoJ!`Bf z|NEr< z)KAO#zy0E6+5Y3@aQEr_f0U>G{D1gJJ-@F7f0yt(T?6h|j7n!X^%fJ4F)yHrOQ=qk za;J>V97nql<(REhyWR$KQDu`R(2m}EaZI9_d?_%lV#h&TE}S@vLYX+hQ}S_)1_h*D zjfSq_+u7IIZO*dklzc?}K_7kotVw}(i@HfjipNO3P)@w9vtGsbNZ_1L}Yok&a8$-$3hNr)e#?A^C#VU`9)YFKP)` zqz`*CMYRJsqF(T=7kZJ8lfDV0rJq#$=hiKFqcH_d@tw6@TeYU(6}Eb3pqM%Z)68ne z=2u&?z}6P)OqHC9B(oKJ^}aIN^YLSflgJAXPvlUH7fsNc^%@9Da~P{70$1HTRFyqA zm2d|Yx4_zUdrSlHf$vN9yk_%7ve<^&fk;O9SFqGoD1@=9tUypu9(;@`gTjWjl`YCg zx%5VxdEG-KRsO@73U}fEJRyx=nGI}}|BiOb{@**>TTlGoV?3SZzeUZA(ge2fiAv{V zvWpKWyC}i*0HTb?R!;ry_v0@6e?cRX(&S6+|E--D<@m3|ou~MJkMk_M|KG~BU=%Ee zTbIj)L^Ic?38u{T>0(eO4YHw?Z<5X6>HTKgp55vcsKR}0|Jz^<3&E*BRZ|q&%h&W% zGS$aX5K|)aMsc`PoLqZh6CPBVZqPG7@nU=~?r^F~;bl`_*4m|jMspe|j>^c4)yVgn zEV7QFR0Be6gj8bw9NRhX9AI9e8*0*!?F)&iEUsUSJ5F`pf+hZa2~EwgBp8+riDr7G z8Lzs+W46+~{t)_wPGHhxj>1wjmHror2k@%2-7NuMggK~o04#&13BVubg#NNm^ZB28 zDV}B_yPp!c<^10ny{y{*?LOsydX(pn<^N|axQZWOF}eJiEP3`{taok9}6 z1EM>&G}ZvilRb*f9;G@*&~*3NFl@Z)67<)3h1@$kf5XO(X<(%{*SYJr8jZZ#l<9{! z38eRJrPGQFGg`ro8KH-82S%vk5REKO(WvAY-R2Oj+g{qiSZMglnVZ@jjYU-HywfLJ zkU#!o;s4z^4i{t=(F8wCA8qpg7u#j~&z;>T`|rnj*3gNUrZ|ZhN+}Yvj&9~SLX(Vy z5IZOK{3~yUncG`K=X1i4N`8UZ9ETy|1V~T^63sS2tdNpx41yQqy%z<&H5B0)92fjh z!c+1Q2ZA>JeZxiXqVN{c2x@YWP>d54k_fv!_x0JuSxOV!TSNPFv7iz9asLbjBw;;w zM$!TNUx43pC%+{F_`iBFpAGo`)K7LD4f2L2o`03aXi7rNdSAKhChmRZPQ0t$S8lq9 zdtd#(y*2csmk^pUboly!_1tUb(*XBe8S$y-k^p-HQ9hx+^xpk+d~ka9r=W3`{eOC} z_xkODy9getkH-B!8traZ?7wz~TTk-;M|sxJcVgPn_c&ajeZCZWy|2(=#8NK|h0E~` zo*>PubCMuJVWAJQ?$7IxIbfickV})+1}Ddz3=dKYIHqYwISqw<3|#LnUnqxFznVRSumx7=;ye1opH z+@U+%C^*x5DF-FNh%n?uJrePecqy5L7)=uFU2!O6dlAL};KGYCFGQXXmmUCr4V_c8 z!<$Fzdr^S4-O-ji+Ti#jOv5FE7BaEXzWEw$@nXZgg%`YqUhh9NLv!z13_A#r<4kGd z!_QlqB?u-g1BB(X;|Zin)exN;h=;bsAcDNJUC~)Hgxl^&0*HhfB1>Rn^m=#b#7n#d zPH}?To;&m!Ge02^DY&(?C@*3@E&JT{?wsz=GG$jOZ`ADFp-Ux@yKEo-F(fnL(-`p! zYHRt+QEQ`h_(GR$k)$v4eoknB@__JYK_arq7ATMST!t$m;VO{T!&L|{eOU*W_YvUA zA}eI~gexbcXKCW4cy?RU6ko@xC}Of~Q|svR6Y8h8G4791|C9oie-GzdAAN3u7vKuX zHI6W2CkdV4#?t6W7DZ4NF`TBL(rbc?pQYd7v_D3lP@l8*9LRwelffK&VG5B%_^qHn zM%%;T5Pe2{5|Na6;cFavw`bU=QNa3Rv^(5HeQEQXzuo3sv740;_RR{R_65vvCY4&N9;*V)PV8sPdtW^hBUk7pRkI~M{txY7} zK)e9`YHI%GOGWIUgiz>0$)@T+yF;LrrBiews~{nt;ztl;WEP>A2FwKPK063fn7W+% zVkD(W-TvYI9P*+lFWS;Nx-4-{U12ofO1)WIEd^KV&CvSg+l-}Xg86|7NBHH21+G`_ z(A{1hV+LGo-Z~lu7n^%%odPlR!U{A+xQ702ui_uA990GAA#)B@M1Mw;9VS1FxN6U`8jv-AZ?)@*<)O|nM% z-vusy3N^NIXI=mlOrXTAg-bhFEC*ZxE>3{E!v!YrH^AX55D7yVY`y~x*Q??E=TR9Q zHLQC@VmW6HfYAv4woho@_Oieg=TY16*D@)Gk?>D2Q3UYSl)vPs-+kAYGU#u>mt}pa zT6VbZb0k?(q}$uNQ}&Cf5Vg0y^@pJo;$qjx^Xz9Vr3-ZhY;FBv;PTt{^qnv{ckcy3f(vX;O2I=3O=MlPPNquUjf0IejuQHdWL>pyi6X0zf8QW#^m*IX8U1Y-PXzt~q7tUPwHKTki3995+lP2GJp zn4@9qb#+UEoIK_d`w>7{uxQCFaoFl%8zn5?UGf1b+U~#xkKoPh!a0R$v zKQ#+ID)>_V#nwsX`>_uKR0n_b7IDaFx9^4hhuEY#BX&GB5{Ia90mMO ze2tS^7z6T6*{qA+Gd#^gp~O|6NHjwqe6f~u4)hGVtAH2m2n%rvZA#F1>2+4vJ z4w!q=CC+&(WBL}lbW8)Sh~P(XOasYXf!wQ7{$6mUG{i~MxsvyDu8Veo?w<4LO{XLx zX}ugI?`KaI&4rXLaNP%AKu-=MlJ0AUsQx@AslhiXb(G{~lsJOJ`A~4R!6f=QtFUxU zpgkyDBJYe9x;rF43bmr|MQDQ2f@TpvYDi?^L|0s6Ib26=%!`G8;cE+&&0a*Nm{o*a z_q`YTa~xzLP8x+F6?w?jD~5Dcv`*7GPB3S+3cS=Sl)o2$Ty}wI;H%CRKb2QO>zDDhf60%?OT%^Ias%LkPs=}b_+sQY3wV4# z{#Jxbw)3QjhHVB{hS!HRy z;)GtqxTZ8&c)ZQkiE-0#IN>|u{Pd+coHO+nj#Lh84qg%L2S77t5clWwCVK6qUXzdq z&#qNF8w*|NwF2_~4IQL(<7o&e7c{5)5oLH94-3vino??T+1*_1S^7o<> zIJ)ju_YNI^WC=&tB%$JtdF>@ch=ya2F%0)9W%hC3YgBX%zDI8oTH(~na9JUOEY4nz zN|`(fonc|ec*B8>n zL>Qljw@8Fvf{P{Ska=b8uDqc(C1-o31U;p^z83^o-;<5GchSDMuxcSYk`zS^!;E;} zcss`t1N+%dE6a-lajgOiRyo>EYfZw%Ta^mgBNAOT2~>ZTQjZe=FQm~-$p4l1hbn)sc{K9cwMwO= zBs~|`8I{qxq7(CkX0tg;<8WiOGe}CWQ!*uua>aVMbY*M#dmW3~r{+Yuzt__*8@^;^ zq@3s3gTvK^usifx7nPT?TFapag==4J_BuL|7eM-ym!es5HnJRn_uvMlltnr#k*Gzm zb*J{*bB5xCEWG5lQKxiIxXfq-t!x)q86LgfJ3(HS&T*6yzgEq4PpPLR7D63eJ)cV_ zD$W`i(mZ)O8jKESX++B@U4)n5&$)@xIbjIcC*e~w$LI>*ih_{o;LOF>!5?E!qvf;;=Y2sLTrjg1CJ9LbTsCB7`VEqZ4_J` ztxg^Uu9G$!R@slKxXE>Gxk0N^32L~Ow8byLrBdd$`V8cXL7P0uHjc|*Gw+t)1+T~r znI$9`T)kqB?67IYRN-py87RY5lqXr?8o8q_cWX74wFp;(&p;WjHlKle(yszsCk=iA zWw_cL1@0+fQgF2zUgU7;qRp+}Q;{m+5~uh3i2R!2HzdSx$uBBy&TuNcKW*z6y+1zu zzwZwqveEJ0+k-RfUANPLUVy8b<+`FsRfdM(iYje?Z%BXOGt|F7z z-XSr`n%gOm*9urDZ`(G?d%;DM8PU^uhYkY;4ozlWL>ORglk@vGbK(mpZ(}0aoMvH= zi<`Ror?$d1rC~nIJ9I>6#P>oZUh@?qz)?tp{ODyie+3%W3RgfFUs11Ny9@jFFm$*>YW+634;ihaOU~JUiTx-R5_^--1t+!aEKM>$%@Pw>fk7oTNvR6H zm>OFEF;!8Z5+ch$#0=2vGKHGsM3 zCz8(5PkZP4-wScGq?R`%3{gOlC%-8*%chVmogEz{1w!}=tzH%f#;!CLZ(*pIyewP#+_usZs&@`C=DUb0hy{>E${9s z<3uVZ$cIoT%PL>$5DrYcg+T?zUkf3oWQG=AEbVZs^i#!vFzKiyZ5O(9yM193jG&XL z1r#&~_X4*IHtPT^5PJi*H=5e3y9Pk0prD~3wmuC*?3XC)dKQJ4F>8O)&`dNhu0ez$ zhSLpKM7;t#c38=|VIQp0LAruBPq!iI> zoFts95=RV6Nu(AyAPRI;oB-X^g036N^kC#S1qEknHZ>`b$wR$OD)8V@l|-3K4-`a8AUN=LjT4IAHaKy_KuF=rvQ;e zK!tTzofk$O#?*c2vb0mr#FjJ%z?U&c1waEn)|=*XrU{zRbPl_U|ItYam_wxX2lgvT z*BeQ$hUk7JX}po-a9K}=wu2#ur_;%xim!}3>(S8dQ0N5e3TrI~&bmF9+nqU(f2sXh zi!$r}eDv+9)o67JiEi67*T$_ZFQ|jd&bGB|&m5i(n^P5D5pnjTX{ptpgKMt+b*jt_ z*v4OMBjnh#A5)LG5fWk9CI zL@B+(k#Qk?m=HXcq=r?>RVdn~#JpP`pa!_a9nLO**%$Y_GNS5Qe^7iywHiz#bc+NC z_NI`3Z3=&Bk*`*rSxg9$l(D6%My9pc5F!JScsQl#QeJ~E`5-TA;XTL9LP8qh6>jNwbeVBH;G5p$dfnqOK4F^dn{ zgN?{la6*GK8ShZ&ebG8_&xHlG$&nYO&^<#jP8NhQAZ8vzH#ig~fc9llfa^WOdvUy% zEb4sK`Bg&%)m7z?IC){WSb)`$aVulrXkNQHTwqJeC;K!D>$J=TGg2Ko#&B(h6=;_K z@N+Hl8=n$f#sas2*9w=pLGBNi3~F45qt|#!qCE7mxNG5$*w6UvO>Xrb%w1Cqf2Y;N zpd#1+X6c*6Ti_d-Ts3=P{givNNnt0i>_Bb=9Fz&+J*i(2E-;&@!yS}$rmFB|#aPBI zm7jPi8}R=v2b=}2MjM$LxKeLsQ<>eN%j=Ola+eQ7SBEdVMM^DPhEa-JEPo$clp?w+Q1b#PT2JZ*3_ncv-$-02$$M_h!eWt3_!*Zjd9}f5BIj3 zuS-{M5m@Azj6sZB0+BG4b41X%hY<{v(}&wGwBOSkGzBY>C=GAXNscBdJs0`Fb@|It zv%BaraMg#@yF-u*F{Q=s>sv>cg9T0#;xEY+S>X~H#cM2CM7D9`$}jsBkn$6#2-iGK z<9iBHD!Iok>#G_IZ4y%dO5cK2)_D!r1H_swroXr2>nIO$WJv1Ny3ZL9W8uBa_otAN zb%E7Bk47r{(z&e?+W+@v7f2e0NNvYvS={j)7dd#TaZQ^5jelRv~=8MfuJ zDAa;k%B|oOd%3I!(|+}<0kh3Pq7_WY3ESkPUI(V#TcQTct~nNJ!E9scYQeN;S+Ikd z-)onJP=UD@-gvi70r_lOs6GnCoeXJ;qig5dO9ZVhLB0x@ZA9AwW>FGe^n9vdGcYr^ zlaMBCNRIJEJdnz zzgV`p6Yyi`i{-T0SGdg=i2%I)BpK~6!0i!KoR?Uu@z|5Es zDNkrR&-Iy*%1|#2JD9JAuNu8xEMUrW^~@IEO}iUIT(?->SJbUQ$z0?Rd@l?qo_~e- zb-o3g%dgEQSJAtI`%($B^-iMAPltPr1xlg9?9V2 z!hn!(XjG`+}@aE$u@SdSB$QlEw!Y&;nJV_j6=&>5inPy;%+;0x-4Ws?gI=NNoydJ=x# zw>+QZ2LwUdnE=W`eLFn7dC^)w?@?yB&5=Z>s1mNI*ZWaMN|o~T+t>_jX~yBC3BUq^ zok^BLtVtl^UMl4IDOq5*cQ{26jdG28wX&DYAR{opq|eAT7Dhmk=$l{MWq2RAr)SEk zLe5d*0Bsy28NCOP1Xe{SfQ-SlmkdHO(RsMdax&)USV~=(wNPOYWBt=n_ce$wD^>&c zf_G7PI~K7b4d8NBLMwcwq6%YUG{{&oU~@0QL0#c804l-pp$H&jl7s9ONN=DbqYFO8QkKED7XBEAUL0!^XIakaXzhi zH+(jwZHQRwyn|c|BiW_QI7uR)-Uy!9fhV0C|IdJ6x zn~YKov+0ziA>~C?f$c;!XTuw6(oc{3tU5_s3|YUw?+bL=K7SSl5J; z6k*8I+=(Zk)ds}{FX4bBkWJv${4O#U>rb+6-io<02W22!jv6SO3*H@;dWsZr@J{h` zT*)r((}l4z2V1iAEW+twFh=UPVkPo7xqbcE5GKM$2>x0{5YS%Lf8yQ9EE*ZZiO+9n zQ3}xT--BFIGgx>m#fb@IY1C@xe7kpcu)9;(IYw+IU%6zG# zvm`?Q=j`3FoQUuiwv1sLMN~09+uwQ&@9rQR`82?RnDTY(&Rp~zP7;!C^!6ZmcdQde z&pn1F7)M2gDd$!B=JP$qcPt`PbW1aB8}6aW?vBKi^g_YEW?sm&tEUjIs-|$1Cbv0w z0rsgd@nRs|B%H4oLQSRu0|DqMMFC+~a5RAOL^LGB6*HV7p+U(z2MPvXZJo%`LECwU zP$p!0 zJ@5vf;)MQ!{nX*~{_|jxc#%KHZ173^<&fa>0NNjX;{U*JiS}<~F2!OKd~x)2S(GuM zL+8*vA3T>jbdo4lIfQ%!oZO+dCXND)W@hl>i1M*SFc#<)E=Wdr-#r7*L8o^XIXKlXiv80tzz;1 z)O#g@@X)L8l#sAg>=Tt5m~WDNS#9z)o6Y9(1eFT}2e$pwtKC1Sb^nw-N$ne`CpM4* z)!03#!|vg{fi#wM(R(KSEEq<7<%qsEdxDuuJr#-B^tY3jGqtDOUhm9+#J~>tuA7@v z=xqh52R2}WeUEP~(`i#3FmiTKoItDr{#$5gx_NTe+fw3>vk(~0sS<0{M zn@Czc2j3KgdU+k4z%7s$enBg~OT2haQry&_fh~TKF13W<<(RLi)|QM^B`&9oA8$E> zCOB8LekNF4nxJnJdc*R-5$_K-DxHd^#-f$HPr^ALn#>RhTp992`URwvzdsDni1RDJ zA1XIYSsgy-wNV%5)la z`Ti)^EtoL!b)_Xgw zMoooVXKPMEC5Kkw&XE3GZOli}Wl&e*|5E`q z4Vmhy@49Zk@F65PN*h1^fNzW6t_uXvoHb7b$6_+091ciYICzY(SPE!fhA)`17|Uc} zv0U{Hu5k4tb$6}iz~f_xYqYB=Qvy_%gRe} z$@K)5a<}`^RPufxFjktk{}2q57vdzX8D@!r>PZF)(RFT|K zKNo}xtDk0O3b5jsSaB3qOioJAmMOKj)p7(45zGn;J^u=RXCxTwDHlc75GLsyCq2m& zTo5f@TthT}E&6)P`Pz5MOpqDEE|jo2r?w17!9{`>Bnoh1yqDIs{_Plz#BnRnvh#*#UbeK^E1= zTB_=knA7Mndj3EE^FPlO>qoC}G)B)q(vK7ME{j9z1z^!sIB^O>RhR%!c=g1+{bJdP zv`m~iSB)*G$y8^)syNbz!uLYK0AeMr&6lJTMCLIh!bagvihY_MlARNi;6=fal z&fHBQ#TpL=hPF*L_RYqo@YpLz3{#43X{Lm8a8oc8Sq+LlLwzCUvzT5-9sshDo3Fg!uJE@n%oaGVdaFG>16;vK7e#tr-4|RS{6*Y2WucrRjqDF5&$k!hAbC+xhx59iss?d(3_$4 zj>&E|ib(+TWAU(n7K37!X6^&O1h|mXiTD?k|N1#!!{}?Zh<#uYF9?72?Bart_tOBU z{y=s);59D_?F@9C&v^q9`lTBYB|eLVaZrp7vdJ&4+JQe$*ut3%W8{wG*rj=5HY}n% z4uB)UaR3}g^w<;U*t^*KHN!o@$tzx5k9v&$4U_*(dGz@Q7|mhihgpCX3)yXjj4L{H zvxZ5;YX{-Ly6LGaMS{UjscF>WDl4e=TN>ao8t&}KtGB8CZl2PfJkpd}8&;S$YN(6_ zWx$iHP0@1GrR=xuGI0v_5wJ48W3AkmOJb5VlRgy8FZ9jUH9 z?ERJ{82x~6(T{LIsj#~SwGeszIkAAkYPb$uEghc`M|7o$g5&XW8=a@E*dU2Q65)!S zL6SwP=bi z!3&z=g0UZoX4d&@Hq{?YLORi^WswPwH+`WSy?{k?AzT67MPN*E!Q86K8t64&8e_pQ ztk?VYHkYE=ECmB5LD%m0pcU+(;Ew6nE6eED*Fcewkv;poNI z?)Kl%@F6;Hdoq@K$=`;*XH+cNSACy)+5wgneA z2A=;c^-?VS$h7cE^BTMW7~)_@IxhB*;fIvp-3+@OpDq?zMABPnA;Wom&h$89nf443 z-nP5re$@*w^Ai#akPOd0utmyE5RnMME|^vV(QOifnU-R^y*cGDS3%+s^oY-!Z#UpE zc9GFSJ`_oHJ;rch&s~IcMje0h;>Be0!r$K6b>nE(TX;nB2Y`FL@JKjDGvY-_HX-zX z&-e>z9C-%fK@1@zPDyKF zql4C>8Jo|%6yJEatz|i5aDhJxIf=b=evchkHj3cJ!Hh_V8hW|tddKBE^C9-Xo&Fo* zfCuQKk^YZf>}>D8DAWHJTQ8sJ|6@E?qapBKV<5~k!ctC_XMQ4BXOmW6amp15R+9C4 z$;9*FY6uLLK?=*jT3UsHo>#t#*5NYTPc-cKd^Ln zA_<~5kpa-AX-+HkB{w7hNqNSwftCUm|2Po=1)OFOL%q8J(gHqmeV0_{!!nVVWAm(T ztP6xeRn|nSNTXCrzdB|B6b#;MjIM|7*2ooNp4_2&jLd*GHgGEbqY9wPhcW8+WebMf zvu13{%Y^AY^f67my<{eLX1STeh@@I{Bzz$vk`gZ@zhUKHS7@qOVT?Zf+*A7jqxYvr zN|E5DMtF*%o^pb<&ei4nd76beWGpI_e9+{5QS1k4b^64yt@l!kd#m7L3C|j z7=>=nOsP6ADo;_y8&@@CK5`H)D^a`F@wSCV%LBwK$O@uAzRZMLtSA8l<25Ef)rwj5 zckTVu@8|DK;jS@g$J#?l{3pc8+l`D0O8UHT7tQb+Z6LnH2?jn7&6Fecokh;3Z8239$~foF~oa zTe3(L*4EHlzCRXTDhcKlzJ-mXvoxtTWg8%s)L;(Ilm@~Cl7zUrs@>Z~$N@GqvWh+9 z3%bOe9cl+tX)q=>t*j(`1xqZs*eIL^837W%`5KMf(JSGHBLj3vcN`Gfif;JqqzMf} zoSE( z#37NBH{fts2Wt{S63w7EXrLWBb-YMbm0*;i8w)jaPvO>zQiEqU(OKoe1>I@Xii_^- zMbnTup3i}L&L{YJ3sgBd7v}hdspe!^M8;BQMtw{CNE4STI1p5T8{wubE54P7g4Bfs zh)i{T!cZ;*)F24Gdwv=qkT1y5xdh0P_32(rZ}P zIR~-I2`~g26$cQLhJOZLDo#M=308pDIJuSjd~-*;+TSq~oDf7|=a&egG_^)PH_ko; ziq~dtJ}vxMD;|?_TwiHCOg#SzN77_=m25dQ*9%t=FL492kASd*%?`UNC;B6Dm;>iM&jpBA%QFiZlT zh{-o_{5cM?P~MvO{a@}Oel|+RzUYNo-0`4? z^J(PcIAtYM439A~i=3DS4sUp%jtm2!!Xml?tJf=hoAXO54&@m24}Q(O@DA>7EH3~u zzcK2=zxiuConoIKn8)-?j1b?QfW?aO;+P(Y_}l9vcNg~GM&CqSn8)QS-lk}P#Mt=U zbVCupojbFP1Q=wj4#T0aQPWuAug~BuAFk$-Ab38I-D-rTSxX`3n zEk&Gc3ZsBNOKEb|ce{a~dhCj6(;&`$32B60lh{$dYmV+IAJJOgJ#20;$125WXyIRs zB#{HKW<@8u0VG2}?CW3#1t}E>Lu5z>E5D=o|0)3l``2nmhJy(!oPdh$qfADtkRq6q zAHf6VHu4t4H*zd1c=8qAy1;Y1Il@)}(42D}0NPs)nX)->&?)wUpAwSdcagu`NJa3A zjvUT14Qtdu1PY{mIsg|kvqFKIm)DD1Ha}FeE=7Ht=;X)!ie0uoprqfmK%wS}As~%B ziVJ}=#ND9t=!B(OLTPGpNfomeaL8DuT}FN@9HIR87;Sw=mTE1 z$pd}RX(#L`<4@Iw5;Y3v8PVT!CN`WAMMMN>0NP*JHHPe{hxFMnw$7LOsz?Rjaw1Gr zc_TtNF;m2AEXltMLp9F#IVupRBi|WtR#l~T zQ9@x1z%-W*=?4RD&5L6%SxA?ZhijH$-=~ZCpp;IYyV`KFEtLMwLB1xms${0Ntc9ZH`-l9To-sA+Qomxz?C?yM=H)*O>!3JxZ zg=ZLYyvY!T`$9vGaiF%*43;uP{E5&Atuu_#1c&ryL#;`(YJS0JAZ~daOw~mGrcB&= z2(cW}>K#CmI9Y^&06M2)X2?yxTlqE+gadM{Xwo?+Oc{kjM1L>DYFJF1E#S8X7!X>r zMxm%}JBB%V0kIMd-fBRUBJo`k-~P{$6gTftA&ayYG(H1#56}lL6#<~ zDURPxFT#?%Pl&mcDRGQBfx|41X^Nu&LCAh_OV>un9FXt#q+^$}L`w;gGy*@5yU+BPb1d&YV zU;qv$HQ0(6a5!k>Zn@j;j$(Bo8#6AHh}g(w*S;$y1CErwN#6vc;NsQ|B9*z?W!ZE} zl*C(zmW#~_DSSXvQ~0L@Q(OIBu^+cep#b)7Y@l{VR;rc_qL{%hi4^IjV^+Rsq2i?GHr45WqqLw4V zfMY#8;ggbJ2L3CiwLwvuDhI`hxsrHUfo|T&O5{FR(>4htE^9~N{o#qWia?;Q`_>D+ z$k!84etiaH;1uFaY1wO;Ov!CV1NmxuI2;aTNyu%vr{#~wRJy_l@$DV}_NOidfJtlxt|vkQ~83f(}$h(xD|~Or)DK42p^-MNKFZrIkU^3s*&K zX|gCPTWmA{X<7vp)p^=7s6%`5E~~D}1Xr6AH}BU(Sq61!R$G-6o7z|9N+uwt5lBVd zqf1n6K|}#JFp8ATLFDY{J+wN)GJ#D)$y6t*p zcczL7bk$3PBSknSEoBnDw$|L1=I$pt(j2dI0undG4Eq@?&zAd8nVwUFG{Jbl8Fl6H zVevXyTkF|i8~1u;i}nI$ilVcnt;4klpx6!wDj|?KkUX%$P@bTslo8tHU!{}js%@Vf zzP7h}a`?J)o6?>V8XRyAcfkJ~B$TFRll&GjWrbFYjWrof7fQC&UeRp2*hNlnNAAcK zu})gdrk_NvPa@a9qR6$h&5LH-PB&nJr*zlQQm85NwXvlB$qF&YZyN&|< ze$EB8!0GfFxox^`?R)vH)`mRAA5(};6DS*x5(C4M>p}W4z3^!$tblYWBT0oyXHg|| z7JjAJOG0CB<_U*m4(&PBqwn=v7_xjAJ{Y=b2FN;+xR_g@#BkNMV6}F)TCf#o<;jGt zlL=ep(0@D|wgyd1l@)2wGdJm28ygyzMU{V6qq*WJ%Q>O`sjnYPPv`^a4z`9CHNcBu zyYj%MJs2*lJ+RLIf4Tageot8S2h*xTz^*1}(kYnwhbH}j5`GlSD@0~#9TKm)X*n+V zb&lacBqu1VBNb-5>{%sdcjWDiL^Jhnu%;SzRH?Fh14Q55^#r9e>vvTY06zhu{eO+5 zWdA{Q2fDS2HX!%y=497Mpf3XH*>8=H<&nLFBepzRza8tCwKo`y6_j%~Fdh+&P9@cR ze|q$sq4mB>z2DzJ4j3lEtq~3!y1+D&HRU)QPy1_=5X+n(5@rb12oBQxRKKO!rm_@G zvMADi98gRiW(t$il$t+=L%CtGbx(KlqV3l`oXHiU|3fp8ZpRB5g=sV(r-n>6IzmUXfEeB_T;J-}17Uh!k3GwE^~G`(yMK$G#)8 zyv;&7@j})<+I+Agr;Hl!r%%cS9rbk?M^BKW!0sa#34dL5ZjR)#snR7{kl8#%A*EML zM9=2*LO~@_-T(9nDW_r4lxrQjk>TRu}9xO1+Pqg1rXm6bd;g^6dC&@AUZa_`9*` zFUwFCF&%{)=&p2DfPfxioBi;LiNW*jcRg}P+wRaUp7iF(3y1zwv z4o>bPSIgXN@S0bkHSPG~rYxAa5p_c9UqQfx;;_DbjrbSs?t`4#noS+tc%kFdMVtt0 zDbb05kRkJf%uOYt$9w$7i9$L<>p$%sZP+0wHLXrWox0W%6M{yT^C{orZ_acO2SbrA zUYM~UWhjR=Nshq_Cii2>WJ;6b4S+N3j#C(b^flwn{4~dr@LiEmRUPQGP+7XH>wKPjr%veq4bc#Y3K*7djQ^IPR~*NW z8SF})EDs*$D!*EPxd2zSVUw!M=P7p5L&Q$1$LsGNK&b(=RiY@>H}`m4#iTAu05^x%q9-4g1fmS7jSo|p! zXCc5Sv!NGdQ%ssq%61eJ4TB)@81+Y6FWsR#6xmGk5d$JkDY%3BMwYaGlaY+dQT1dU z)5SWbx*MLNJUvBus$;hvE!0!B>%Vlor|!dDF5FY+cDuxUsuNcIUeTU94{nu^Pn{dL z$9;Nb#C_`A)Kl=M#|{3}d1C&1BS3XV(c^}Js)F@JVnJD%qt<{>h1 zk4qrqLh&}&C2Eqr*Gjykx;RVa%Zy;J3~yCaX8Pm-^yC5bSM&gS3cBy?{hQjvJtmT;oXE&w#BIDEm-=L+@7SSrj_l4#Y>?Nb~C_ zCq7#4W4u?rIvN;1 zlg;0zNl4^EP4o zH>C`n0@)oKj@tJeisQq4fp^*2c>a?aMXiVlLpv!vsd!t5#yd0P%7|Mf^_ZDMY*sz# z$_@Nba6nJZi6X2g7;Fi59JYse_4l{ieBE+qR&es>G%Mjcf$yU29oCNYf)@uugD7x9 zn6`9v$Kbh)?*MVV-*t%aPowJGGjMtaPEKE*vw3j5h0k4MZsFCOIj1@?WV*4iuFDbD z?#*W@ytq?|@=-aD*kPwybg?;C!CmTc%{CfT5)dRku7C1lO#b1M`7az0zeyz7wOUBf z+3zqWzUu@`Up!6-VzNZfBx4J1q?~m-X-ygRfZ4Wg1a!t*vNAlUEc=o*nHQD70l%Le z19bH-=#u>LU@AY9^SmwSGe1*RQ9#m;4i_QW5eFLF|MW)2bivy;)8K}<8!R?i6jy1- zO&KK{TR*#RHOM{*t)_55@T}SWFoTcoSAItf)VU&N*jasHvE{0-UKORl*Au6$K}5C( zP~ub-&Zxjy)iD9LT_qzj*&x=A=npIj7%l>IRm$utn4y1GtL_-rWvZ^I?heBYy@I%hLX0hW! z5HYr>fLn+u0ipT1#-4QkJZ?DSz>KATn#E4722Z`CS%t4t&M z=w4MOq}J#FrILM>2b9iDIv1!_xImev|BFc^XQ7@Fk07JBv4=ju`Xc@HRIc#EY~_!cn}QC@Ajd7gTEk&I%b{sVmbV`^4jCmf za^*M2D>5BwD$|#5H2h)o!`NmV%q{*U9p|gPY73U42L816O}E~Oui9C*Mn_xaMy&z2 zj+?ByKY22~vNihF#!I#a*y_BfHGp)UvFr7WtuZD4xjwNq2Im32VSD*ZYS|@QaWAX4 zvC&gY{Nx9mscK1^?-n9pga{s?>=^+L9QXRdiTsNs4kqB4FdW9vg?^^uW$Kd{+JP(} zK4+lBQTasq00t~6QV3}S|KA--o~a+{1cu%P)scf6{dP!<5|ZX1nZ+oHE^ZxPi7z4F zX3-)J7vh6N(28;lBj^yN>i(RKc=6=QIPCnJ81P=^&xQ@IxVrAtBANu7PCuMou#g$M za)Q%0Y>N4`Ztx*dBIfalI}IuC;6XmQip*;2H}4qKtn5p48!F$a)QRE=NFwm!1sG{x zrYIJJpS=?bs`ie~_6>~LIc_9Qh0;$sO<1kvLF~qU#1HA7-QR;v4t?kgT*2qhbjAJ) zT><4Q{rUMbm^&C;K^OuL-Gt{a-^NrDw!*W>oc9asP_rjiG8l2&8= zn86my8AqOV@kbb-TR?6RN1C~ADczQOS_~KH26{5AmXA}B#B;`gG`>;+#WW_45h-yw zT*TYS@!A2U{)>D{p_JG1kYPmzO)H9ULF!BTIzX-y%n>G&A4Wfnda2YnmQjIn!I)5- zMrc*oC<=k2o)U_k>M#Yz&RG)7E}lhB%#Xzt_}ow}s@-ZS^)cK@qZCu1 z!raN7x43aKJ&(__`zt{I*F<1{&E}Q_SiDq;&UG$?ugZ-R@5V_eV;!}bDH~>?qx`7|?#kkb!eBbU{*3PYgwwX8aNv_;K`R1hi3@hAi^{}9YJd?I)5RHeL9PoD@08AMH^=8g}@J|v4csRxFm^n z3?)|-9xsgDUBN7(qaSj|JMQ!1sR1;HEWW7ggc`u=F5ZD8bkTx>kelAX55@hZp$bg- z+YfnPF7UMzQ8dO1nZ8TRFX*VSQXe&yjjIh4RoY6VXYfI1|q`hydgX5pl6K{BGN@6X#!PU2$rAaghTZ1&+)7 zTqmk0sEizqZ`{ej!G2TGMDZ#L7156zs-S?X*x_D84w;wXyX!>$?#-CFi{ahP@op5O zPj@nXOs|$K8n}*k<6{(eLa7r7)ZD4T_58F;@zYD{Z7>9`>`fbzbl1! znVGv_A1E5T-!b~iZ0920i6T>_{aUAYAu-asa$X8Cy_SS1EC#gbrI#V_Ex0sgdAyu} z-{?vd&1NSNbBD+ane0G_GSW-RXD-V?Qmx~1!raT|Fg;I5JHr$J%Dy}<2Uuzvc0Um+ zJT-Y2>HuqK_ zUS*#x61IUgw(Y=}V6~(?yE(Tp8VxaskZ02f+M%E*t45g~pxdHg{|is-=3pLmMWkJ2 z_bfp`n~F0j#-l0mh&tjUkgi-`jD9en!m#m@`V|j_+CaUk5bNre zUuC*H%idK+!I2X?3yl~S$V(^zeV`KLV(-(Zk6$VE)ez|^yZ=}!71$tN@VCnvQLh9o zS74?#S%#dv2=Ii(%o;^Wxwxpqg7`ga(2Tc4$gAWl1=s79T6Zbl-KK5XO`EV4i1=o7 zqs9Y=dF+r+ICm?7(ADdHbf|wAkM=IBHfAg;ZwrT_lf{OI z9`yeF$?*f|{dt0@XeG*_y^6ROqo^o{i#A#&&$tV<@Y}eOBEn+^To@0*aVUf1sbO50 zhjv+i|jJcgo@I~na>$lIY zS)*8wRkl+rI<=xxEB+(ZiVbncDhp$syU4AjsbDwjH_{+T4!|U4)!PAdfC# zF#qvS#T)t?Vp5g+`B}6M>F=nd%NE}al4Qe59-0Mi!?J{P_BT7Uyw%JMP_dcmT{JU| z4HMrfo0Z8AR>;nkWt2M^1>h>-iWYZs;PD*Tf9D(-jRK8mO5)6qyXSsPr#=-c|Mb3- z!RxfK_1s=BJ%wju)DOH)^pgj^(hB;y?tbV-ew9B|lK${A2)!vS z_eOr1u!v8+Z@Nc5*W=NyJ9lED#c3gM5wTb;yKjDb_v-DCd?HJw8DN`p9B*p{${jvTlvm?At1NYVs-MZk-erb!S`2z@XFpFa;ND(8@e`x&MzfPPTI!wy;O znI45)90r*fK^=~tC~_zvm~Sl8yF)viApGTv;P2QWb3 zUl@f{mwC~@AL0nQLkbmt7=0YdtVW;v*=cH{`QOfAd3u(^k;m$9gQ-v8f+0y&sRR$x zn78zu`0=^V5OC*6hBMt1XiPTKQOjmln}^p*EZQ1#L7DUfoD#5b?f`MFsg65^w=&{b z8tFg<3r^2u9AX)QH@mMSE<=G^5yh3~MNKdjvkHYhQ%4iuxHZS}w%I%~ONp4sx3A8B zKRq$bW$o4*HLcD2vwJz+YN%kjjOon#(dTD{NzYK8)a8v9sz`$j_d)ITdG_1Oi#Nx= zzRC?3vl!N!tpN0$D0*QVw`{O>(z~KBiXCN){qyI(btTc=Bg+d$OY*fN5w3+N?d@QS z;#(*7z^3drU;)MQoRt6BGs`|(gxx$J-dOb`AOZmXaeV&f^v%zcEDk6SmMw2G{?~u~ z7h9st5E~#2xtL_3Ls|A1sy1s7vrYaSg11?wG#{tQ(4aR2WseGRmA$r3jygA5piyN) za>|Jo##dIJr+$gDD}5uTGDt zn3u0cn<<&o_2@Ju2t1r-Q-h^8&~NIk;YZhUG&CPk*V(VB>uC6X{TdGwHNOf`Q(Ny* zA+EC5^`?k&_0$WxkfQ5R=g5$@EDe3lBNDUxz&s8-h+-7MnA{ERbClpr#jtl(rn(>S zB#4DM($0&Nc>c4@JzGOrR-vvzS5})_ znX0S;ZLTS+i5kikWm7wC4SmWqJ%pNUn${<(CEH2jt5cH8XXUHWk?k|O9u?W%{=;d= z)qJ#pf?R9$KDvIaV)=;b@z|n!sgm?3nYP0Zrzl>CGwRTgabO4B_}C|to;mr&7jev7 zb^d$7QYbMm8$2XY(58*@^WV{E?{Hr~|J@rMj-Pkuzw3DJ?{~ii#|c3*7{ZwDhIW95 zsC)qn2HPd`Y=a4d0 z80(-$!$Td?V-M9=`Bbj|ISe8gw_f|-RIdO1z5Tts|Ht#A!@X|(uj66s|HfZTn0x-z z4%YXOV-m6cM6MAW8e(3_wsAt$Ku!(|ppl}1`Ge10GHCfuibX)B# z;BV4x|4%^TBy_>iKKs*Oyk$9hL4S||lgxpmc*62qa+Eld6k{1O(h)InukJF$xzAiV z`mA8qWEA@$nS%a*=S2} z!{M#C@?R4T&H5QAh}_=;KXik{D=Dvyp4h(|YZ*E_Y=vQAzS8NQSkE#U@&5__NDzTF za5VI(8cEFtR$~~o@@pAJdyDpA)atBb7}dQ|jZsvKuVoOe4cf=Bu-$v$Ar!6~HAYXA zT+6_j+Om(C>L|0i;mRb4lM3Ex5(IbPkHiVMOmU)!3Cm&cACSu@R-RBPMR_ky!?$C! zfNG{2ZrSx=;PJp`fxqwxH~+J{fLlV;3M(+XbP+*!yIkI8b{HuV6$7sPm}&{KjI}^5 znH%5?(kAQHcX#MIeBU`2|9`Vb^}eWC8xwu~4y$2U|&N}AixBuA@W_KZ4;DhL}!!-!5Ozx6D8 zy@+*hr0&hHwq?+xys_w^bNL0k<-wblWOh>~rXs8P-~Y{^Ne1%Ih~88Ffh&TYep^yT zBAB<9g~9Gs!%l##o9u21KCl!z^LoinSMZVI3KE4G%zU7t0L7t8?8*L=Ma~&QP%Z`W z$`Um*Nj0=gH^H2{3_1&vqJW9ec3;TbZWm=M?FTBpgyZya&G&uH2}szisryYc6ZIvL zp<^$CT-n}GNOsPEf?9}gr#fU>OI!VsI^`Tci@H8V zFOIr;LC@$_zCALkwR1Tofw7*QwC>fEt)&`9yaC=0*nXa>5aBV=wiuhtZ+$_`J>;r ze>LU#?kK6aU^RqAr|g&fg|(V4J@Z!%s@T89F>;?OO}Lr@_CWCUm5QM8g!M6B=pY8f zw$a_S^OvJ>G{0SO0gWt0LGxxE;_|%re^4L4(Te)e$a>z6gd^I7Ul)U#nKuL@sG{1e zMaIgZvq#hM6;zQ0@L`VQ4eNx_$o$uU(IM~x^vuCPr76gehf){NHod# zc5m<{Uf2R0ppP?PLDgwr7q+ttJ4|bZp-R6FoFTE1#s}gDsM(Km9diG-i5vN^6gTzG z>^4mrDLYlPcD77Q@Asa8#dB0|!Z-UW3mj%MhmZ1XVO)W%YhS6Q%=;UCEeYE_G>bt+ zb)o-iCWP$DcBss_|Lw;l_x3%bsLxcDjq{bZF)*Cc!RkxhHwLKf%Vvwnxx^aid!{SL z$GP;>Dz-_U=zg-D=M}Id-qsnhRd1nm#*1}1OJ1p2)+^&fJnN*jZ(#)=w}5n;zS;7K zR(RKaK$ol7C?;5@+VP0uhPsYHFlF{>)s%Sf56$HMSKio~>Cmz%Ke&EfxJ`(z1`75u^(TP?Irjn?Pm>o^0R0zzK79Y9A<3-{?IP`#(xJXKPUdmwEIjXt@g zL(z9l#V-P(UxDV>yqW&(2t-*N7A5wQtPcky6;9wdtv+2NAx z7N}o74|^gREi8H*L5hsPvmJ~rmR49GncrlT4UMjhDD}_~q{L(|yAKc-QDHu#i8M3+ zV4XvMy7k?BJrFm#h8@*;kp3q#LdY({GJKna=yh*cw+K_T!)cOM9)+5{C1F2oWbQ_-p@SIe4kgrBiOiCt_+FWQeQ${C6;XZs&rrnZoCV>pyGbleC2 z@d?`bR|ea6OpTsHBKsy5cMpOp|AnfhoM2gL)I-+?=Ji#bv(eI5`)T%gr8Yd897l2c zVWePY@kbb8_Ugb2?wR~u#`@P{IWVeu^@O;m#|g`1%Tq?Go0k2|c|V5mlo4JprriD5 z?V^}zQgU|cOG0MBt3xaJsCu5cupZ;YD~ zrt+>xZXpFT(h=Wt`X3FhJT0e=K2=jHs-a3P+lLkNzQFw!aOlR>h` zJ{a%2{&=xfw{5w$p>wrCOX8`EFT@BBiMmZ|6FV#wKH&aAM}w8yL;FT5K5{1>d7~R! zlW*jyBy-O}<*jz0yK`ZHuJoZyytC2A5*C;aXCn3!31SPusK2+(7s(LY$Lo{g>JjX+ zlFLQDG`i^NQi#kYWv<{~olyD>$UJHvsw9)Cb>F`*5L}0}vHVF9f*23s2*Hpq$BZ|; zOkr*eKqXhI$qK>Jbm_gEn18HK9cFg$Lzc&H6!kcGRYKCujF_X>V-e@pj!xQjG<6z@ z88coyV`%BU6P32pr9>eyRBNu>lCP&*gmH1+`U~$p06W)(9g1k}h^y`ggDfB%U~0@6 z>pG3mZmFQY--~}GNERPnwBse#CTG7SaZ@uPEZ6ad{TwB?O{HWK02Ev{V7n+CWw2^# z($Dy_$H^6VsG_hs8kOqr_qFu%B0qQTd7Bmdo^wnExdg@9UD6M}mvtgecu{6z*!C9tK>kBzvnW?&^ zemdk4>@**M@pW4S>@D?{LOw%(`7jHOS?oixv^OHQ38W!Nq9l63Y$Fh7wR!S4yxM{ zCCHH(dvC+nQ$-v}yx&+Xc*uq17il=!3Nw~AmIB))WXd*|NER;?j{u7C8&cjrEzgIE zF`}l(WErHBqaJ%`VV@d5m*MHc-`STfVm>1fJLxu2OCP;S*k_l3mGBi%-_x?315_Va zp90$M_dlMt(F2^nDc@-POFCbustAYb8lTTajC{SBfg(BkcG)QN+^_sV_{5qpLbg=X zBxFH6DwW3L;ASB#sI?M&%{o0qolV7)3NNd4`Yl{YhgQ?NhHLCAb5FxvHv?5 z^&^2Ut#w#^D8089eP)LIle07sAw?cx{H$NX8M4$1!(yyZ%n8ux_;1WEmyPr{tNV;k zuGFpZX1n7tNP_}EdmQlwfUXZEaP5(<9QZe;6-TMm%?{74vZM$co3|XB zrq=-C0%>_sI|e~*A*L7ZNB=UkFbR_HH##2qU6ZwT!m7q*O`Mv|J$2IQl+8Kyff zP{S1lvv~!*bX5ZvV}_VXho2sqww6|Onol}`c&ItpJcP=oqJX~6f>RkQp^5%CO1w!^ zVnEJm4l&x)6gPjcVH``_PsDPzIyn#3wKG3*^39TW{fu+N ztyFn^3%dV-Q5>jD8(Kr^Va2xEta3PX64drL7sMs4iybOmWbT^F7)a%)_$fsFb7c`b zw0-d#MwUvt2R}3Z$;2=VOoqTf(aq*_-7&DW0T@oqYXHkdzH6}oS`mo0b}EwMzD2lm zHNMsAJvt?%H10UK_ogJMk0HKt{A(JYVXN5n+rJ>vU1BH8H8>2ua>oQ;g1J;SCi!5& zWy+LbGH-2r7LfKeFnd?@{!x3=7k~%+U{)TAi%A!X$MX2^Z?x6!VVHwa2BtbjGZLk&71JJU7P#VV1Hk=IQ*#U-qKRrvN2OwoL%X;f#M0> zgjUE23WIeG0d3@tq1D!%K_eSzeF+bw3PA2kXOCXUI8a*^2>57EcU>h4`cbaE&havQ zT$mD&O3HbHbXoVHPzc{;jBe@Bq6^}Tn4HNVV}BaNQJCNMsZ6)`m9~s$!|+YDb32}B zoKJs^nQz3hcu%^dL`IW$zLiXexD6RTh_Ha~TzFiI zKvTs-#&2n?AUO*Aj4-6#4M#k4H$V`>JT9g(NtbV0QNa4{1lbUygmg3yO-w?1N%jcJ zFe6p4s*W4C7LbmtYsK@-=nczQxZG~Pm{$OEyS&NeF}wipb#wq)uIFta$>$w(K7bjo zwq!c4TBr)qoKXrVzC;UXPZMK|%DOv%%WyKlGka&ewK0HUidWNG z&EHta<=b(eQaTPIG%!N~_o>-@gQJApDz6<(y$_%;PbyIPKnmnL^%qy6&QzA_#p110 zmvIOMYcCiYmR2TmKKzw>4tjrfZqk5GDgHTF&)Dg}$4b-M27+n}>e7e{=i!Ks26KIZ z1-5+nEl$#z-g^??9SNWH1yYZMXP$0v0V}_iF~D75;v8tP3wCy4H(8zj(P2&JG~_+x zbC-rdWME@S7sKAs9=Y?IF{$Rau3;ir#E5-vAkr~F(s__u^n+)2L&|YT`8#-v>bk|N z;u+{Or2qz=>-GR!@1;>dGCKX+^?fQVa-i2zcP`!T?*}f2-<45cjNr)}a<6fQ(YPZM zCrKRJK)brf-6KXdjDdtVfhSVkYj0$4aA9wGzHfV%?~csMCz{7N#wXYdjsGz#K_!gz zw9+S31Zu0-UZXE_=&v82x^Z^3j3sK+IO|-F$*@k9alW8MmQ%pHiKpNKIbxM906WJd@HQafdW_WJs+9l(o*ilQKp-m%3LeHlTOe^r|SI#p!@vRAoaF6Xr}XF@-_;iy>KGbs7xAeW6X? z3x{4P>fK{ZUQe96Nl(AVnDa+z8zh>4gYY^Ng6@mqDW|8Ku_IQl9gt-6@YQ&N7UY%^{ajv&LwQ|9a@)o&t| zzoV9NLdA3TRQR&|7RWz+0tG~?D42Af{jWrE_7>o;egUutbm#Q|O;7JH0Y^Z6JncyF zv0D#j;|5Q=trSUGhc2sbu-XwleErn`Az@U2oysb#I$45 zE-?r{3wR6*j&zA!@ojxfe(33+l1hQvytcgPC90}~&oWl_3%7}7y^Y&M^(Ivcw0!7?v{p_-KrZVM)H#p~k8h`swJFTA8|T)?$;tN%M)l6tSB$hX(3L!)7H>-a z@cO>Y8X?{)wJ_BWdPYckafr1r;;Z3CNU-oIinE{c!T_DrC3eqB4y8g_`(9Se4u5u9 zzvoKLoUPUD?h$-q`HesdMEvRxh)l4R7qOm|@=vnABvFUcfz_^GSjFnL!@>s9m}jD6 z&Z@k$m|lN8-dZ`u+VR41^JT15d%mO(W0`oPL=1NS7KXLwH6#z5UU$eHmn!-kGs^B z1gXAI+>Y|BWw9O_psD`X5LO1^%xQvH2FD-oNCT|H1l2?X zyex--UIG7x0`(^vW8Y^`#Z{1-1$f+(f)-LWFzb*}4a*hm(rvKf`UQkhBnw_%Y9b-3 zVclWLaxEZo^86QeCqvO@ko&TADM%+Xoiov9Fp-D*2bj2kUjOL|Hq-UORno;!ns_KxGB{)!{`;-pZLdxj_A2fW zYECvr+CS?C@PGjQ9s&6AFWg_YmTox@fERKPy&brCpoK7fBErl(ZV#~D@UFN4T+Vtl zAxEPhdW1Jw2Lb-`-N2e7>pj3w7~C-{TW0Cpd9=JMgv3tqDVUvy(7N(}lTLRG8%$AR+agc@ouAy@cL_e&3$D+fP# z4GVzif&}|>v^f9UmLBNsxetQOFM&nk`Ep=DUAS>7=?>gKXr>63LGNGff*2sYlkIW9v7zPqiw1Xdr6F}A~Q8uPzP6Iu=s0+h_A z+ys^Q3vmEMY$j<11@rI6gMyNO6<>n(!&fpG3#rs4*m-~Ms40ZfQQG{{KQK{+z*etI zLC0Q#OxjLTp&Hf;gv5M{5u?K-@7G~C`KP$p_Gy$sZ38dB}?rYpG#0^niWGUg!flC?nl3xb=RE==DOtaJcD>g%2@hb(DZG2Jjy)HH)(p( zI1bkou}0cMuLgE5PK-4MtDiVxcNrLKX;*z>QdZ+1m>87f+f1SAzv;d-zfNq=G7%Kk zTgJ5#$UCFh{zG9b8XYh<-FLU#o6~kgenA;`e%t@P8^Hmqvd?R!UHi2qcw^s$*ihW- zc!W#eeI`1W=FwOkF6j;Yp%Aa3^f9%rGk2gw-fm^z20Z zQu@qXKz6c(0TxYlG<{EW_gBqF3tx9B%80n*idr$cNC75ep}6u_)C!a+H9mYi@id{* zVd6x-=jKV7l^<2g%F`3nTbDCTWbBYTs6UYYJ-<|p4}aO@D4AC?_Q<~Ei^Vx`($us( zPy^X14;K1rENaT7kxGul8JmOr2=9t9th{T`G9j zKQk5K#kcPaD@v!Yib2>V-m$YCg6D~Xl568Z^t)Xffxh>|;kXPMdNhet`oPCB5{ki; ziNxGw+!b|bZ78cjzMu(yrAW6OX}usbXP^4(p4*5LS>E`y-@v&o3CT$@jQKE^& zenoMI4&qKn7@D@N|1TwpZ8WKVc{Rr1H|ELPjF^twgL9Lp;#Zp|Kh;$9n(e7({@KG>jR$Q!d^s-&*ZL^XT+%TsWla#FBw>o21JCt3IER-fdRc- zu1${UCSHl+xb*06g$tRoT2^Y^2>lb;o2ihDic)VDdHcLINbF!7??slJA&T)+LXCv> z#0I((53WRDF4=qtZ0s8m@$&r3*LRNvyr-9o`xJN<5y;s_!AzMv5h^T{A*~o^lqwm( zMDHjaaFNs_MQbbRUsa`}%=b6$MF*zHVN`mr_*=}g4w{57wifPHy|=i0m~S=~=2%C; z3wo4he9QwS)$MPk2eB_pd`3-D5}Z2*@R83c=H0J<69AN=p5-VYR~T3q_7ghY|DWsV z5%5EHZ90dN4m2K1^M0VnTpJ2lX5kl9JNA6ks1Ce5p5?oz^{sgC$rWZD2D~@_=hn|& z6}Ypm?s4A&gNM;Ju`I*pmzwo5XY$=G(Oy`oP9=C*?LWw2@v!HfcB_KIm`8F)Sih6X zfUTeO>bD7S_28I>G-b~=Pn!kDSFO6ya~R|<_!o-4iYL8C@#i*r9D_{f4mO_qGvuWE ztKN~-U-}g{x^J0^io(A1!~`EsG==B7(v(9kcvulsT=&2C2n(+bIG@1pP0{tltKBFK z70Fyj09pc2?F2Xi`4d1_?vK=WVaDO6JS;LM(QU=d)Y>( z-~nWC3NAcpw?nvVMu zk>rSvOf->+k5pRaE>l>0t5}(1;~sgjS*SI73KR<%IVEXKWpSEHIH56&K^K+W%d-8hQ*AUdj zLV|B~Ede~g@!tl!bH#@b?v*rg<$rXI{dHxSWBW#Fd8+QP#29ndmmR%a3b)PI7UB$# zFo?I(vE47(k;nzo2+aEXw<7y=GOJUDte3@SF=EZFM!al+I z3L?Pbg>WuTSF$2JrRLPqdQ`hCFP}o&)v%2~qEY^j1)I38g;Y3Z1GD^a0>!Yb1f%tp zWHmDKZrrTCYKTovSU$8!&!D8TS|WXcoVOf~tE9RWb0kEht_6-$OCM640ACty-{)VP}T{HR$BaM&}eNOZqM*k#BWMQ7b(iiz@>_GC;(;1%$0 zhKK2yKER6I5vJ|>vj$FC40G6&|a^qC>35@p3(9_^vMi6adO*>XJaYcqs3YTRng?%{q%&A@eX2ZP=C>zp^HO` z7oKns!0!5NCYW3?F!EA?Aqf-egT8(l*trYi$bJn*AO-X=b(-q-OjfcOu=%if%^j>7 zpe0mGZD89O5nt#!3;S@bEj%+CVK{_)0SG>=>iICi_Fp<8TIlKZjLqGizdco zO+rEPSih>cOZ^(ODM>;9lm^4{ccVTNZOe7za>Jfz*alhKg|4h|FI!1J6svXYV;8d> za4(OjQaY7m*!_p5pb4Ki{ctGaRQn)Bl&R?% zrCUs$FTydDEhx9hmX)Nj(!#OB)$O0c>vEdkJo1GAaYt~e$Mz6Ismor|Brs^phxWLT z0@$kqjiq8>ve-PlOCUDyQWe1_Gb=f#we3$4;P}z1e#l_^)$m`q8mWn|3xZ zH+CsyPk7l*expI45eO|$Zpxnii{YPRlh%2`^k3wmYjf3f>(2_WJjtapgWI&+tmKcQ ztt_`37x=ZZND(6HG;svGc(%R@wZX4|vJr?FM#%f(JLGG}xMVU!<5!ORKLN1_ta&6t z7==2sUvlx|h};)SN+Qfugv`MlMg1B-hAS3|tLx!H?G?vnjJLDAN9b<4LCYE|yGlL> zt;=lpw*jWS3L2*B_iEb;bs49^Rq46LR?lHApljPxch^TEWGQ+V$J2sBPPGN`%Yx(v&g8Q z(#ma9083z`U6NHLWvVZ^1~A-3^6vF8Z-GZKpU&%L>wHOX%Ey59Kj0tAFaVk07TLjG z;3bLsV_uU$TIjK~;s?M<;4Rw^B&q6K*8rFrjC1e$dF^LC7N>!a8aV(d_HE#&C86WF z$JYB&e<(2hsylW8uncIZcvj?1E(psx^erbMoe0@WS@^p~tbU+wdFPmNXiT-sZSfrF zaXaA%c->7V90)%g%W=G=AAO`NaeZPV!gO#-P3p|opT&4T{+#fc2hi-AU5b5CHb-&B zy>FOuv}tl?Wp37W!(;9sE?RZ``y0ch5Wn;P5~SpF$?Tsa&w}A%OM?ih1~WR3z~1#G z0hUzl2iuD|kSn1I!egUS%BPXWkPK90GaY&v9J1~feYFe9Q}H|S?Y$8Y&W89J5hS^7 zPLUXKf@`r8fyJgYrTzb7NV#bypA0FA=aV6MZTt^IB3`PszS7VJwO?&3S4cyDRBZywemjU(fw>nc$QVO03}n{caDr&6=vNl_S~eGp4klZ zMdWdXxUupa<{s3b#gfU|s2X}lA^QR^q&ljX?yeaeckSg38!AOXX4moB)FI(TvDD>< zqkic6+Pq(fw{1h$zo2&~n;BkpTu(m&&zJ?`tQNi<1a=HM?w0cbtNcud!1=E5CogsY zSfkhG+i@)P2I0~Q!C50#g$?I*8B2D>+go8dCh)4}^FXoS|y zowXL%{Kl&|%n%(r_B8U$0~^%7$rDD<>U{!}8>~%X9#9L{;pqX9_FEDe3O~6DF(lck zjs#R?0wz;2#RX6Fes+rf?ue(RG8mL9W>v*i3NrQS@jYR5c(fiuSHJPCk}MtTVls{o zdzIQUxqO%qXjW`vfadJ)YI)1~OhvlpoChSe2Ex~a7=YTM&`w}^bLSHjG^Ld;e)LR^ zC%|~^h8IMCgm794NZlT#T1;NcuGedp{QkxEFFl+eH7>^M}9nozUGVD!QDH^U&|6cax{)C0s1fu?PNhhw8M_r+YJ5Pf07=!5855j!= z+%=_Z-w(2$`CPb=yL4GhYQr*o3p(Hm_Vw8~#N08(btbBS?_z8bJhFQ7E{;X=+#u;60Q08TU z0q(>*lbfw!Y2TM;py*>(^$7Uvq^kx1l-Q^%zCLF$bdQ{TDytyM=;s{;2}F$9!l%Kh zpe>jxj(v-5(t~)FS?LcNmo%sNqjWDxcpO{_YLe9MR+3pqr#`Xkl#+V2Ylld<6Fp^z{DK8tHWi_K$rp(or8S zr&+T^whpiQ_0^G`N!q*p=y<3w&h1!wxYS4g-r_xxxzL~G$nvn|Iz|wq2N)M(wVpS) zV%HQOTKHXtXl)rECV;^|v5s|wIY_VxYk%Wh(P z&4DT2+TnEEztSYOdik(PF%GBWf!gjuMR-YajX4|z-CUK-4-$uDp(pc+bO%Lk>;yQDiyIPf8KZ#$=rnMEI~j{aSj-5{SR5`tM`L~I9&MNi_>tbfpe zPr2*~f8T1KmbQO4p70Z*vR9Wu7-3H8%TSRE`CO9wFh$l7YRr=nFKQLlWNjlFZ4Yr? z&l@}})T~u7ePr~a)t$?tjAM`|{5u=*gU_wlCfWdoA(n>L&ucCG7J5bJS4g@gt8ZAe zOudk?5kXT$3hI-H)cBCgedzi0k;!bu-}lQj+fWhT(eu$m&aRvplryY6zS9=EZ_^@T z4QHgeiAfx?Damh6e}EWn;rNkcvE9(~_ohF62E`=l3!~Al*eveu&zMY@3S|Z*G4VA_ z492VxCo3d#_58zse{h>YqeT>vq%Kn7n>9814k()?n4@rNdNC~$o9$Dxq_~Q-S=AF&ncN7dt1QJu5Rj!9CMPqyeU0&f}Pf=GU{;-Dt)w zBWz0mdfgvKRX;)K6qd|lZM{D~t<;(D6B!tUIIa>l!i4v3O+Cf&33*r5H1-A$Ed8)U z@aF`vC67_5*+qg!q%2p?=tCJJ+#H_~CP|QS-E!JoNrPj_Yh)1?ze^Kxt(MINN3Y&m zM0rGPo1<>n24VK|Wtt&aMqSmz5Dc^nZV<(%I5?pBc0FYzv1e8M?2o{^joP(=uwTP_ zHI63yP*5%8pdH;sR8pOk+hQM5Wdn(p)(Y{)&;@O4M?q5SW-|@ygtWPekL0uM5et{8 z;SRV(Tx<$Dm0Q8mU9&`Nb3)TSnp8c4${$WTuTyaP%H(GOl`P{MGgBI8#nwlSfLEX9M6xT)9y)wtN1;@y?+^FD3xoJ_! zD%T)?)qXki44IMUt??mjkmLTZLG&o{?xHOpk+&pvX6NUrpW6vP#9?$-VlVy#vf=0| zwWbO@@!-G2-`x})XX&#N1y^Pg5#pIIVFRuCYz^x4BkCY){0K@mq`zGl*npNWg< zT;B^nP!vKDM7o_6?~w0!tn`&K{|COGO62a|l9TpJTbFoK7LVy*=-q^om9iU$ipp_s zZi9PtzC84arbDLrhrce_%&h&!xZM(fMs2e5pKKb=f9%XLk1K6r+|5dCxia4{ zMWGZ8W88+?>TE`q#6M2hVDDj5YGDd>gIevk$6+6VC{DA+!}{wxpJ=pW&^Dfiewe}( zsKxCrY-6P-BuEKfG&2rPVsxN?v|sfW{N)~-G?BU-52_LlJUzq0UWu4FNtb8m6vFZRg|ukzt5tdj=u+-3sQ}dBx&#E=f0X4p zqYFsOZbOW`;az)?MmE%-@h4`E7-=MxBM2{VBuo|iLtk(*jxD03vscR zL*1a5z2=EyL(Ev+&T*=w#|FgejD8R!Z-8cVU)&IAG|K7*@=`jcNGyGL5C1HaQM84z zrKmzX*QT2eN}R(Pf2Qc{!lm1-Yjpa_6nk&)ElIt$Fgek-@*eEOP>~C1E=#f|UaP>U z=UvF-OFytjW@kJ*Bz*;}G0o?4EANRtMo(fX9#S|&Y_`3dSB z%JeiHL#@fXP!KExOzdxd@h+;+g~!rZ+$da_8q@Y7tct`edtdub^o*ctWh82vyX%#5agu!08FzyOA}5;FoqAksi~PrAmr|%&zbgcrO{5ZZGF_$supu zlmGc^rNqu}lqnvGRzU{?Sr+rAnf_oDrA#B6lGXz^p%>qP;YHiX35l|0M&jQ?sb@LC zP-*+uUxrls7GijMHerbtH9G4OHmTO)Uj3?C-?cZl7&}^L@?dZ)>XTVtDtz4bd@>#K zb70o3c*vu7$TvO?U)jzdz=+)+k2z%iZx_(-dFbm3D{>-gC2j8>-g z^Z16h7K++C5er8Lds>I!eoppscJX?{ZWNf`ZJ1D|tB}NlDbh>D%GqY&s9YHGb->daOw6TN4*4=Bp?|T1v4+TCYiRP#f*v zPkwc8F~!L$;Ga%H=g_xf2QT!`6N=q#SUoCGG$d-z=&FLcrNvWZ&~u&d z8y>bW%^~i;9R7k|c)Z|5>(MKto(%|f;b93U$|%9b&DtH3XEGdLblw1_UhxXy*`3uw zeYEj#2Y&&)W=cpyu~N8jNG7Kh8K*6G`ksbtDMIa1zFc`oUG)Wu;O42_#&7wKa8S{^ zNEL7An1AIV1&UdFO`K4!{w^_VkjYcTLi8A&sEqy2QT0N9OK~}Z#|poS zA2g!&+qwCprqC`fn&gBp4oZ(+*vXcdW#xE_pP_KBm$7yppN{fG=JHy#QmTr~YNv<_ zi%Md<$e;Vnn(sj*VcMv8ng3h&i`uVA77_A?nxXCrsUA;&OjIW`f?7O4%*c&kl8|$)~T++DaX?^6GFZ~tUtP-zQ*}`fIuqh|( z{QU$!gC= 1.18 +- Most Kubernetes distributions are supported +- Recent Ubuntu, SLES, CentOS or RHEL compute nodes connected to their respective official package repositories +- Helm 3 (Version >= 3.2.0 required) + +Depending on which [Container Storage Provider](https://scod.hpedev.io/container_storage_provider/index.html) (CSP) is being used, other prerequisites and requirements may apply, such as storage platform OS and features. + +- [HPE Alletra 6000 and Nimble Storage](https://scod.hpedev.io/container_storage_provider/hpe_nimble_storage/index.html) +- [HPE Alletra 9000, Primera and 3PAR](https://scod.hpedev.io/container_storage_provider/hpe_3par_primera/index.html) + +## Configuration and installation + +The following table lists the configurable parameters of the chart and their default values. + +| Parameter | Description | Default | +|---------------------------|------------------------------------------------------------------------|------------------| +| disable.nimble | Disable HPE Nimble Storage CSP `Service`. | false | +| disable.primera | Disable HPE Primera (and 3PAR) CSP `Service`. | false | +| disable.alletra6000 | Disable HPE Alletra 6000 CSP `Service`. | false | +| disable.alletra9000 | Disable HPE Alletra 9000 CSP `Service`. | false | +| disableNodeConformance | Disable automatic installation of iSCSI/Multipath Packages. | false | +| disableNodeGetVolumeStats | Disable NodeGetVolumeStats call to CSI driver. | false | +| imagePullPolicy | Image pull policy (`Always`, `IfNotPresent`, `Never`). | IfNotPresent | +| iscsi.chapUser | Username for iSCSI CHAP authentication. | "" | +| iscsi.chapPassword | Password for iSCSI CHAP authentication. | "" | +| logLevel | Log level. Can be one of `info`, `debug`, `trace`, `warn` and `error`. | info | +| registry | Registry to pull HPE CSI Driver container images from. | quay.io | +| kubeletRootDir | The kubelet root directory path. | /var/lib/kubelet | + +It's recommended to create a [values.yaml](https://github.com/hpe-storage/co-deployments/blob/master/helm/values/csi-driver) file from the corresponding release of the chart and edit it to fit the environment the chart is being deployed to. Download and edit [a sample file](https://github.com/hpe-storage/co-deployments/blob/master/helm/values/csi-driver). + +These are the bare minimum required parameters for a successful deployment to an iSCSI environment if CHAP authentication is required. + +``` +iscsi: + chapUser: "" + chapPassword: "" +``` + +Tweak any additional parameters to suit the environment or as prescribed by HPE. + +### Installing the chart + +To install the chart with the name `my-hpe-csi-driver`: + +Add HPE helm repo: + +``` +helm repo add hpe-storage https://hpe-storage.github.io/co-deployments/ +helm repo update +``` + +Install the latest chart: + +``` +kubectl create ns hpe-storage +helm install my-hpe-csi-driver hpe-storage/hpe-csi-driver -n hpe-storage -f myvalues.yaml +``` + +**Note**: `myvalues.yaml` is optional if no parameters are overridden from defaults. Also pay attention to what the latest version of the chart is. If it's labeled with `prerelease` and a "beta" tag, add `--version X.Y.Z` to install a "stable" chart. + +### Upgrading the chart + +Due to the [helm limitation](https://helm.sh/docs/chart_best_practices/custom_resource_definitions/#some-caveats-and-explanations) to not support upgrade of CRDs between different chart versions, helm chart upgrade is not supported. +Our recommendation is to uninstall the existing chart and install the chart with the desired version. CRDs will be preserved between uninstall and install. + +#### Upgrading 2.0.0 to 2.1.0 + +Before version 2.0.0 is uninstalled, the following CRDs needs to be updated. + +**Important:** If there are HPE Alletra 9000, Primera or 3PAR Remote Copy Groups configured on the cluster, follow the [next steps](#update-rcg-info) before uninstallation. + +##### Update RCG Info + +This step is only necessary if there are HPE Alletra 9000, Primera or 3PAR Remote Copy Groups configured on the cluster. If there are none, proceed to the [next step](#update-crds). + +Change kubectl context into the Namespace where the HPE CSI Driver is installed. The most common is "hpe-storage". + +``` +kubectl config set-context --current --namespace=hpe-storage +``` + +Create the Job using the below commands, which will modify the "rcg-info" record to the new key "RCGCreatedByCSP". + +``` +kubectl apply -f https://raw.githubusercontent.com/hpe-storage/co-deployments/master/yaml/rcg-info/v1.0.0/convert-rcg-info.yaml +``` + +Completion of job status can be verified using the below command. + +``` +kubectl wait --for=condition=complete --timeout=600s job/primera3par-rcg-info +``` + +Continue to [update the CRDs](#update-crds) followed by [uninstalling the chart](#uninstalling-the-chart). + +##### Update CRDs + +Before reinstallation of the driver, apply the new CRDs. + +``` +kubectl apply -f https://raw.githubusercontent.com/hpe-storage/co-deployments/master/helm/charts/hpe-csi-driver/crds/hpevolumeinfos_v2_crd.yaml +kubectl apply -f https://raw.githubusercontent.com/hpe-storage/co-deployments/master/helm/charts/hpe-csi-driver/crds/hpevolumegroupinfos_v2_crd.yaml +kubectl apply -f https://raw.githubusercontent.com/hpe-storage/co-deployments/master/helm/charts/hpe-csi-driver/crds/snapshotgroupinfos_v2_crd.yaml +kubectl apply -f https://raw.githubusercontent.com/hpe-storage/co-deployments/master/helm/charts/hpe-csi-driver/crds/hpereplicated_deviceinfo_v2_crd.yaml +``` + +#### Uninstalling the chart + +To uninstall the `my-hpe-csi-driver` chart: + +``` +helm uninstall my-hpe-csi-driver -n hpe-storage +``` + +**Note**: Due to a limitation in Helm, CRDs are not deleted as part of the chart uninstall. + +### Alternative install method + +In some cases it's more practical to provide the local configuration via the `helm` CLI directly. Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. These will take precedence over entries in [values.yaml](https://github.com/hpe-storage/co-deployments/blob/master/helm/values/csi-driver). For example: + +``` +helm install my-hpe-csi-driver hpe-storage/hpe-csi-driver -n hpe-storage \ + --set iscsi.chapUsername=admin \ + --set iscsi.chapPassword=xxxxxxxx +``` + +## Using persistent storage with Kubernetes + +Enable dynamic provisioning of persistent storage by creating a `StorageClass` API object that references a `Secret` which maps to a supported HPE primary storage backend. Refer to the [HPE CSI Driver for Kubernetes](https://scod.hpedev.io/csi_driver/deployment.html#add_a_hpe_storage_backend) documentation on [HPE Storage Container Orchestration Documentation](https://scod.hpedev.io/). Also, it's helpful to be familiar with [persistent storage concepts](https://kubernetes.io/docs/concepts/storage/volumes/) in Kubernetes prior to deploying stateful workloads. + +## Support + +The HPE CSI Driver for Kubernetes Helm chart is fully supported by HPE. + +Formal support statements for each HPE supported CSP is [available on SCOD](https://scod.hpedev.io/legal/support). Use this facility for formal support of your HPE storage products, including the Helm chart. + +## Community + +Please file any issues, questions or feature requests you may have [here](https://github.com/hpe-storage/co-deployments/issues) (do not use this facility for support inquiries of your HPE storage product, see [SCOD](https://scod.hpedev.io/legal/support) for support). You may also join our Slack community to chat with HPE folks close to this project. We hang out in `#NimbleStorage`, `#3par-primera`, and `#Kubernetes`. Sign up at [slack.hpedev.io](https://slack.hpedev.io/) and login at [hpedev.slack.com](https://hpedev.slack.com/) + +## Contributing + +We value all feedback and contributions. If you find any issues or want to contribute, please feel free to open an issue or file a PR. More details in [CONTRIBUTING.md](https://github.com/hpe-storage/co-deployments/blob/master/CONTRIBUTING.md) + +## License + +This is open source software licensed using the Apache License 2.0. Please see [LICENSE](https://github.com/hpe-storage/co-deployments/blob/master/LICENSE) for details. diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/app-readme.md b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/app-readme.md new file mode 100644 index 000000000..29ca912cb --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/app-readme.md @@ -0,0 +1,3 @@ +# HPE CSI Driver for Kubernetes + +The [HPE CSI Driver for Kubernetes](https://github.com/hpe-storage/csi-driver) leverages HPE storage platforms to provide scalable and persistent storage for stateful applications. diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpe-nodeinfo-crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpe-nodeinfo-crd.yaml new file mode 100644 index 000000000..86c60cf34 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpe-nodeinfo-crd.yaml @@ -0,0 +1,70 @@ +--- +############################################# +############ HPE Node Info CRD ############ +############################################# +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: hpenodeinfos.storage.hpe.com +spec: + group: storage.hpe.com + names: + kind: HPENodeInfo + plural: hpenodeinfos + scope: Cluster + versions: + - name: v1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + schema: + openAPIV3Schema: + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object." + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents" + type: string + spec: + description: "spec defines the desired characteristics of a HPE nodeinfo requested by a user." + properties: + chapPassword: + description: "The CHAP Password" + type: string + chapUser: + description: "The CHAP User Name" + type: string + iqns: + description: "List of IQNs configured on the node." + items: + type: string + type: array + networks: + description: "List of networks configured on the node." + items: + type: string + type: array + uuid: + description: "The UUID of the node." + type: string + wwpns: + description: "List of WWPNs configured on the node." + items: + type: string + type: array + required: + - uuid + - networks + type: object + required: + - spec + type: object +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpereplicated_deviceinfo_v2_crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpereplicated_deviceinfo_v2_crd.yaml new file mode 100644 index 000000000..b95a9eef9 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpereplicated_deviceinfo_v2_crd.yaml @@ -0,0 +1,115 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: hpereplicationdeviceinfos.storage.hpe.com +spec: + group: storage.hpe.com + names: + kind: HPEReplicationDeviceInfo + plural: hpereplicationdeviceinfos + shortNames: + - hperdi + + scope: Cluster + versions: + - name: v1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: false + schema: + openAPIV3Schema: + type: object + #x-kubernetes-preserve-unknown-fields: true + properties: + hpeReplicationDeviceInfos: + description: List of HPE Replicated Device Information + type: object + items: + type: object + properties: + targets: + description: List of Target Array Details + type: object + items: + description: Target Array Details + type: object + properties: + targetName: + description: Target Name of the array + type: string + targetCpg: + description: Target CPG of the array + type: string + targetSnapCpg: + description: Target Snap CPG of the array + type: string + targetSecret: + description: Secret of the replicated array + type: string + targetMode: + description: Replication Mode + type: string + targetSecretNamespace: + description: Namespace of secret + type: string + required: + - targetName + - targetCpg + - targetSecret + - targetSecretNamespace + - name: v2 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + properties: + hpeReplicationDeviceInfos: + description: List of HPE Replicated Device Information + type: object + items: + type: object + properties: + targets: + description: List of Target Array Details + type: object + items: + description: Target Array Details + type: object + properties: + targetName: + description: Target Name of the array + type: string + targetCpg: + description: Target CPG of the array + type: string + targetSnapCpg: + description: Target Snap CPG of the array + type: string + targetSecret: + description: Secret of the replicated array + type: string + targetMode: + description: Replication Mode + type: string + targetSecretNamespace: + description: Namespace of secret + type: string + required: + - targetName + - targetCpg + - targetSecret + - targetSecretNamespace +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + + diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpevolumegroupinfos_v2_crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpevolumegroupinfos_v2_crd.yaml new file mode 100644 index 000000000..2803998be --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpevolumegroupinfos_v2_crd.yaml @@ -0,0 +1,124 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: hpevolumegroupinfos.storage.hpe.com +spec: + group: storage.hpe.com + names: + kind: HPEVolumeGroupInfo + plural: hpevolumegroupinfos + shortNames: + - hpevgi + scope: Cluster + versions: + - name: v1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: false + schema: + openAPIV3Schema: + type: object + #x-kubernetes-preserve-unknown-fields: true + properties: + hpeVolumeGroupInfos: + description: List of HPE volume groups configured for 3PAR/Primera arrays. + items: + type: object + properties: + uuid: + description: The UUID of the node. + type: string + + record: + description: Metadata for the volume group + type: object + + snapshotGroups: + description: Snapshot groups that are linked to this volume group + items: + type: object + properties: + id: + description: ID of the snapshot group + type: string + + name: + description: Name of the snapshot group + type: string + type: object + volumes: + description: Volumes that are members in this volume group + + items: + type: object + properties: + volumeId: + description: ID of the member volume + type: string + + volumeName: + description: Name of the member volume + type: string + type: object + type: object + - name: v2 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + properties: + hpeVolumeGroupInfos: + description: List of HPE volume groups configured for 3PAR/Primera arrays. + items: + type: object + properties: + uuid: + description: The UUID of the node. + type: string + + record: + description: Metadata for the volume group + type: object + + snapshotGroups: + description: Snapshot groups that are linked to this volume group + items: + type: object + properties: + id: + description: ID of the snapshot group + type: string + + name: + description: Name of the snapshot group + type: string + type: object + volumes: + description: Volumes that are members in this volume group + + items: + type: object + properties: + volumeId: + description: ID of the member volume + type: string + + volumeName: + description: Name of the member volume + type: string + type: object + type: object + +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpevolumeinfos_v2_crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpevolumeinfos_v2_crd.yaml new file mode 100644 index 000000000..7f995a724 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/hpevolumeinfos_v2_crd.yaml @@ -0,0 +1,68 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: hpevolumeinfos.storage.hpe.com +spec: + group: storage.hpe.com + names: + kind: HPEVolumeInfo + plural: hpevolumeinfos + scope: Cluster + # list of versions supported by this CustomResourceDefinition + versions: + - name: v1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: false + schema: + openAPIV3Schema: + type: object + #x-kubernetes-preserve-unknown-fields: true + properties: + hpeVolumes: + description: List of HPE volumes configured for 3PAR/Primera arrays. + type: object + items: + type: object + properties: + uuid: + description: The UUID of the node. + type: string + + record: + description: Metadata for the volume + type: object + - name: v2 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + + properties: + hpeVolumes: + description: List of HPE volumes configured for 3PAR/Primera arrays. + type: object + items: + type: object + properties: + uuid: + description: The UUID of the node. + type: string + + record: + description: Metadata for the volume + type: object + +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/snapshotgroupinfos_v2_crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/snapshotgroupinfos_v2_crd.yaml new file mode 100644 index 000000000..6fc351999 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/snapshotgroupinfos_v2_crd.yaml @@ -0,0 +1,112 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + creationTimestamp: null + name: hpesnapshotgroupinfos.storage.hpe.com +spec: + group: storage.hpe.com + names: + kind: HPESnapshotGroupInfo + plural: hpesnapshotgroupinfos + shortNames: + - hpesgi + scope: Cluster + versions: + - name: v1 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: false + schema: + openAPIV3Schema: + type: object + #x-kubernetes-preserve-unknown-fields: true + properties: + hpeSnapshotGroupInfos: + description: List of HPE snapshot groups created for 3PAR/Primera arrays. + type: object + items: + type: object + properties: + uuid: + description: The UUID of the node. + type: string + + record: + description: Metadata for the volume group + type: object + + snapshotVolumes: + description: Snapshot volumes that are part of this snapshot group + type: object + items: + type: object + properties: + srcVolumeId: + description: ID of the volume that is the source of this snapshot volume + type: string + + srcVolumeName: + description: Name of the volume that is the source of this snapshot volume + type: string + + snapshotId: + description: Snapshot volume Id + type: string + + snapshotName: + description: Snapshot volume name + type: string + - name: v2 + # Each version can be enabled/disabled by Served flag. + served: true + # One and only one version must be marked as the storage version. + storage: true + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + properties: + hpeSnapshotGroupInfos: + description: List of HPE snapshot groups created for 3PAR/Primera arrays. + type: object + items: + type: object + properties: + uuid: + description: The UUID of the node. + type: string + + record: + description: Metadata for the volume group + type: object + + snapshotVolumes: + description: Snapshot volumes that are part of this snapshot group + type: object + items: + type: object + properties: + srcVolumeId: + description: ID of the volume that is the source of this snapshot volume + type: string + + srcVolumeName: + description: Name of the volume that is the source of this snapshot volume + type: string + + snapshotId: + description: Snapshot volume Id + type: string + + snapshotName: + description: Snapshot volume name + type: string + +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] + diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_snapshotgroupclasses.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_snapshotgroupclasses.yaml new file mode 100644 index 000000000..b58878471 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_snapshotgroupclasses.yaml @@ -0,0 +1,60 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: snapshotgroupclasses.storage.hpe.com +spec: + conversion: + strategy: None + group: storage.hpe.com + names: + kind: SnapshotGroupClass + listKind: SnapshotGroupClassList + plural: snapshotgroupclasses + singular: snapshotgroupclass + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: SnapshotGroupClass specifies parameters that a underlying + storage system uses when creating a volumegroup snapshot. A specific SnapshotGroupClass + is used by specifying its name in a VolumeGroupSnapshot object. SnapshotGroupClasses + are non-namespaced + properties: + apiVersion: + description: APIVersion defines the versioned schema of this representation + of an object. + type: string + deletionPolicy: + description: deletionPolicy determines whether a SnapshotGroupContent + created through the SnapshotGroupClass should be deleted when its + bound SnapshotGroup is deleted. Supported values are "Retain" and + "Delete". "Retain" means that the SnapshotGroupContent and its physical + snapshotGroup on underlying storage system are kept. "Delete" means that + the SnapshotGroupContent and its physical snapshotGroup on underlying + storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + snapshotter: + description: snapshotter is the name of the storage driver that handles this + SnapshotGroupClass. Required. + type: string + kind: + description: Kind is a string value representing the REST resource + this object represents. + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshotGroups. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - snapshotter + type: object + served: true + storage: true \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_snapshotgroupcontents.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_snapshotgroupcontents.yaml new file mode 100644 index 000000000..a7132c59e --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_snapshotgroupcontents.yaml @@ -0,0 +1,104 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: snapshotgroupcontents.storage.hpe.com +spec: + conversion: + strategy: None + group: storage.hpe.com + names: + kind: SnapshotGroupContent + listKind: SnapshotGroupContentList + plural: snapshotgroupcontents + singular: snapshotgroupcontent + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: SnapshotGroupContent represents the actual "on-disk" snapshotGroup + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents. Servers may infer this from the endpoint the + client submits requests to. Cannot be updated. In CamelCase. More + info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a SnapshotGroupContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this SnapshotGroupContent + and its physical snapshotgroup on the underlying storage system should + be deleted when its bound SnapshotGroup is deleted. Supported + values are "Retain" and "Delete". "Retain" means that the SnapshotGroupContent + and its physical snapshotGroup on underlying storage system are kept. + "Delete" means that the SnapshotGroupContent and its physical + snapshotGroup on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + source: + description: source specifies from where a snapshotGroup will be created.Required. + properties: + snapshotGroupHandle: + description: snapshotGroupHandle specifies the snapshotGroup Id + of a pre-existing snapshotGroup on the underlying storage system. + This field is immutable. + type: string + type: object + snapshotGroupClassName: + description: name of the SnapshotGroupClass to which this snapshotGroup belongs. + type: string + snapshotGroupRef: + description: snapshotGroupRef specifies the SnapshotGroup object + to which this SnapshotGroupContent object is bound. SnapshotGroup.Spec.SnapshotGroupContentName + field must reference to this SnapshotGroupContent's name for + the bidirectional binding to be valid. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + volumeSnapshotContentNames: + description: list of volumeSnapshotContentNames associated with this snapshotGroups + type: array + items: + type: string + required: + - deletionPolicy + - source + - snapshotGroupClassName + type: object + required: + - spec + type: object + served: true + storage: true \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_snapshotgroups.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_snapshotgroups.yaml new file mode 100644 index 000000000..3372a7db7 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_snapshotgroups.yaml @@ -0,0 +1,83 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: snapshotgroups.storage.hpe.com +spec: + conversion: + strategy: None + group: storage.hpe.com + names: + kind: SnapshotGroup + listKind: SnapshotGroupList + plural: snapshotgroups + singular: snapshotgroup + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: SnapshotGroup is a user's request for creating a snapshotgroup + properties: + apiVersion: + description: APIVersion defines the versioned schema of this representation + of an object. + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents' + type: string + spec: + description: spec defines the desired characteristics of a snapshotGroup + requested by a user. + Required. + properties: + source: + description: source specifies where a snapshotGroup will be created. + This field is immutable after creation. Required. + properties: + kind: + description: kind of the source (VolumeGroup) is the only supported one. + type: string + apiGroup: + description: apiGroup of the source. Current supported is storage.hpe.com + type: string + name: + description: name specifies the volumeGroupName of the VolumeGroup object in the same namespace as the SnapshotGroup object where the snapshotGroup should be dynamically taken from. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the volumeSnapshotClass to create pre-provisioned snapshots + type: string + snapshotGroupClassName: + description: snapshotGroupClassName is the name of the SnapshotGroupClass requested by the SnapshotGroup. + type: string + snapshotGroupContentName: + description: snapshotGroupContentName is the name of the snapshotGroupContent the snapshotGroup is bound. + type: string + required: + - source + - volumeSnapshotClassName + - snapshotGroupClassName + type: object + status: + description: status represents the current information of a snapshotGroup. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + snapshotGroup is taken by the underlying storage system. + format: date-time + type: string + phase: + description: the state of the snapshotgroup + enum: + - Pending + - Ready + - Failed + type: string + type: object + required: + - spec + type: object + served: true + storage: true \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_volumegroupclasses.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_volumegroupclasses.yaml new file mode 100644 index 000000000..e201ec94e --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_volumegroupclasses.yaml @@ -0,0 +1,60 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: volumegroupclasses.storage.hpe.com +spec: + conversion: + strategy: None + group: storage.hpe.com + names: + kind: VolumeGroupClass + listKind: VolumeGroupClassList + plural: volumegroupclasses + singular: volumegroupclass + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: VolumeGroupClass specifies parameters that a underlying + storage system uses when creating a volumegroup. A specific VolumeGroupClass + is used by specifying its name in a VolumeGroup object. VolumeGroupClasses + are non-namespaced + properties: + apiVersion: + description: APIVersion defines the versioned schema of this representation + of an object. + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeGroupContent + created through the VolumeGroupClass should be deleted when its + bound VolumeGroup is deleted. Supported values are "Retain" and + "Delete". "Retain" means that the VolumeGroupContent and its physical + volumeGroup on underlying storage system are kept. "Delete" means that + the VolumeGroupContent and its physical volumeGroup on underlying + storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + provisioner: + description: provisioner is the name of the storage driver that handles this + VolumeGroupClass. Required. + type: string + kind: + description: Kind is a string value representing the REST resource + this object represents. + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating volumeGroups. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - provisioner + type: object + served: true + storage: true \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_volumegroupcontents.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_volumegroupcontents.yaml new file mode 100644 index 000000000..d944909eb --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_volumegroupcontents.yaml @@ -0,0 +1,96 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: volumegroupcontents.storage.hpe.com +spec: + conversion: + strategy: None + group: storage.hpe.com + names: + kind: VolumeGroupContent + listKind: VolumeGroupContentList + plural: volumegroupcontents + singular: volumegroupcontent + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: VolumeGroupContent represents the actual "on-disk" volumeGroup + object in the underlying storage system + properties: + apiVersion: + description: APIVersion defines the versioned schema of this representation + of an object. + type: string + kind: + description: Kind is a string value representing the REST resource + this object represents. + type: string + spec: + description: spec defines properties of a VolumeGroupContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeGroupContent + and its physical volumegroup on the underlying storage system should + be deleted when its bound VolumeGroup is deleted. Supported + values are "Retain" and "Delete". "Retain" means that the VolumeGroupContent + and its physical volumeGroup on underlying storage system are kept. + "Delete" means that the VolumeGroupContent and its physical + volumeGroup on underlying storage system are deleted. + Required. + enum: + - Delete + - Retain + type: string + source: + description: source specifies from where a volumeGroup will be created.Required. + properties: + volumeGroupHandle: + description: volumeGroupHandle specifies the volumeGroup Id + of a pre-existing volumeGroup on the underlying storage system. + This field is immutable. + type: string + type: object + volumeGroupClassName: + description: name of the VolumeGroupClass to which this volumeGroup belongs. + type: string + volumeGroupRef: + description: volumeGroupRef specifies the VolumeGroup object + to which this VolumeGroupContent object is bound. VolumeGroup.Spec.VolumeGroupContentName + field must reference to this VolumeGroupContent's name for + the bidirectional binding to be valid. + Required. + properties: + apiVersion: + description: API version of the referent. + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - source + - volumeGroupClassName + type: object + required: + - spec + type: object + served: true + storage: true \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_volumegroups.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_volumegroups.yaml new file mode 100644 index 000000000..862b4398a --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/crds/storage.hpe.com_volumegroups.yaml @@ -0,0 +1,69 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: volumegroups.storage.hpe.com +spec: + conversion: + strategy: None + group: storage.hpe.com + names: + kind: VolumeGroup + listKind: VolumeGroupList + plural: volumegroups + singular: volumegroup + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: VolumeGroup is a user's request for creating a volumegroup + properties: + apiVersion: + description: APIVersion defines the versioned schema of this representation + of an object. + type: string + kind: + description: 'Kind is a string value representing the REST resource + this object represents' + type: string + spec: + description: spec defines the desired characteristics of a volumeGroup + requested by a user. + Required. + properties: + volumeGroupClassName: + description: name of the volumeGroupClassName to create volumeGroups + type: string + persistentVolumeClaimNames: + description: persistentVolumeClaimNames are the name of the PVC associated with this volumeGroup. + type: array + items: + type: string + volumeGroupContentName: + description: volumeGroupContentName is the name of the volumeGroupContent to which the volumeGroup is bound. + type: string + required: + - volumeGroupClassName + type: object + status: + description: status represents the current information of a volumeGroup. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time + volumeGroup is taken by the underlying storage system. + format: date-time + type: string + phase: + description: the state of the volumegroup + enum: + - Pending + - Ready + - Failed + type: string + type: object + required: + - spec + type: object + served: true + storage: true \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/files/config.json b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/files/config.json new file mode 100644 index 000000000..d00650184 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/files/config.json @@ -0,0 +1,128 @@ +[ + { + "category": "iscsi", + "severity": "warning", + "description": "Manual startup of iSCSI nodes on boot. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "startup", + "recommendation": "manual" + }, + { + "category": "iscsi", + "severity": "warning", + "description": "Replacement_timeout of 10 seconds is recommended for faster failover of I/O by multipath on path failures. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "replacement_timeout", + "recommendation": "10" + }, + { + "category": "iscsi", + "severity": "warning", + "description": "Minimum login timeout of 15 seconds is recommended with iSCSI. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "login_timeout", + "recommendation": "15" + }, + { + "category": "iscsi", + "severity": "warning", + "description": "Minimum timeout of 10 seconds is recommended with noop requests. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "noop_out_timeout", + "recommendation": "10" + }, + { + "category": "iscsi", + "severity": "info", + "description": "Minimum cmds_max of 512 is recommended for each session if handling multiple LUN's. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "cmds_max", + "recommendation": "512" + }, + { + "category": "iscsi", + "severity": "warning", + "description": "Minimum queue_depth of 256 is recommended for each iSCSI session/path. Can be set in /etc/iscsi/iscsid.conf", + "parameter": "queue_depth", + "recommendation": "256" + }, + { + "category": "iscsi", + "severity": "info", + "description": "Minimum number of sessions per iSCSI login is recommended to be 1 by default. If additional sessions are needed this can be set in /etc/iscsi/iscsid.conf. If NCM is running, please change min_session_per_array in /etc/ncm.conf and restart nlt service instead", + "parameter": "nr_sessions", + "recommendation": "1" + }, + { + "category": "multipath", + "severity": "critical", + "description": "product attribute recommended to be set to Server in /etc/multipath.conf", + "parameter": "product", + "recommendation": "\"Server\"" + }, + { + "category": "multipath", + "severity": "critical", + "description": "alua prioritizer is recommended. Can be set in /etc/multipath.conf", + "parameter": "prio", + "recommendation": "alua" + }, + { + "category": "multipath", + "severity": "critical", + "description": "scsi_dh_alua device handler is recommended. Can be set in /etc/multipath.conf", + "parameter": "hardware_handler", + "recommendation": "\"1 alua\"" + }, + { + "category": "multipath", + "severity": "warning", + "description": "immediate failback setting is recommended. Can be set in /etc/multipath.conf", + "parameter": "failback", + "recommendation": "immediate" + }, + { + "category": "multipath", + "severity": "critical", + "description": "immediately fail i/o on transient path failures to retry on other paths, value=1. Can be set in /etc/multipath.conf", + "parameter": "fast_io_fail_tmo", + "recommendation": "5" + }, + { + "category": "multipath", + "severity": "critical", + "description": "queueing is recommended for 150 seconds, with no_path_retry value of 30. Can be set in /etc/multipath.conf", + "parameter": "no_path_retry", + "recommendation": "30" + }, + { + "category": "multipath", + "severity": "warning", + "description": "service-time path selector is recommended. Can be set in /etc/multipath.conf", + "parameter": "path_selector", + "recommendation": "\"service-time 0\"" + }, + { + "category": "multipath", + "severity": "critical", + "description": "vendor attribute recommended to be set to Nimble in /etc/multipath.conf", + "parameter": "vendor", + "recommendation": "\"Nimble\"" + }, + { + "category": "multipath", + "severity": "critical", + "description": "group paths according to ALUA path priority of active/standby. Recommended to be set to group_by_prio in /etc/multipath.conf", + "parameter": "path_grouping_policy", + "recommendation": "group_by_prio" + }, + { + "category": "multipath", + "severity": "critical", + "description": "tur path checker is recommended. Can be set in /etc/multipath.conf", + "parameter": "path_checker", + "recommendation": "tur" + }, + { + "category": "multipath", + "severity": "critical", + "description": "infinite value is recommended for timeout in cases of device loss for FC. Can be set in /etc/multipath.conf", + "parameter": "dev_loss_tmo", + "recommendation": "infinity" + } +] \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/questions.yml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/questions.yml new file mode 100644 index 000000000..3c5cd2a8c --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/questions.yml @@ -0,0 +1,87 @@ +labels: + io.rancher.certified: partner +questions: +- variable: imagePullPolicy + label: "ImagePullPolicy" + default: "IfNotPresent" + type: enum + options: + - "IfNotPresent" + - "Always" + - "Never" + description: "ImagePullPolicy for all CSI driver images" + group: "HPE CSI Driver settings" +- variable: disableNodeConformance + label: "Disable automatic installation of iSCSI/Multipath Packages" + type: boolean + default: false + description: "Disable automatic installation of iSCSI/Multipath Packages" + group: "HPE CSI Driver settings" +- variable: iscsi.chapUser + label: "iSCSI CHAP Username" + type: string + required: false + description: "Specify username for iSCSI CHAP authentication" + group: "HPE iSCSI settings" +- variable: iscsi.chapPassword + label: "iSCSI CHAP Password" + type: password + min_length: 12 + max_length: 16 + required: false + description: "Specify password for iSCSI CHAP authentication" + group: "HPE iSCSI settings" +- variable: registry + label: "Registry" + type: string + default: "quay.io" + description: "Specify registry prefix (hostname[:port]) for CSI driver images" + group: "HPE CSI Driver settings" +- variable: disable.nimble + label: "Disable Nimble" + type: boolean + default: false + description: "Disable HPE Nimble Storage CSP Service" + group: "Disable Container Storage Providers" +- variable: disable.primera + label: "Disable Primera" + type: boolean + default: false + description: "Disable HPE Primera (and 3PAR) CSP Service" + group: "Disable Container Storage Providers" +- variable: disable.alletra6000 + label: "Disable Alletra 6000" + type: boolean + default: false + description: "Disable HPE Alletra 6000 CSP Service" + group: "Disable Container Storage Providers" +- variable: disable.alletra9000 + label: "Disable Alletra 9000" + type: boolean + default: false + description: "Disable HPE Alletra 9000 CSP Service" + group: "Disable Container Storage Providers" +- variable: disableNodeGetVolumeStats + label: "Disable NoteGetVolumeStats" + type: boolean + default: false + description: "Disable NodeGetVolumeStats call to CSI driver" + group: "HPE CSI Driver settings" +- variable: kubeletRootDir + label: "Set kubeletRootDir" + type: string + default: "/var/lib/kubelet" + description: "The kubelet root directory path" + group: "HPE CSI Driver settings" +- variable: logLevel + label: "Set log level" + default: "info" + type: enum + options: + - "info" + - "debug" + - "trace" + - "warn" + - "error" + description: "Sets the CSI driver and sidecar log level" + group: "HPE CSI Driver settings" diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/NOTES.txt b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/NOTES.txt new file mode 100644 index 000000000..e69de29bb diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/_helpers.tpl b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/_helpers.tpl new file mode 100644 index 000000000..165840d52 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/_helpers.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "hpe-csi-storage.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "hpe-csi-storage.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "hpe-csi-storage.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/csi-driver-crd.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/csi-driver-crd.yaml new file mode 100644 index 000000000..61275fffa --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/csi-driver-crd.yaml @@ -0,0 +1,24 @@ + + + +--- + +################# CSI Driver ########### +{{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "18") }} +apiVersion: storage.k8s.io/v1 +{{- else if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "14") }} +apiVersion: storage.k8s.io/v1beta1 +{{- end }} + +{{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "14") }} +kind: CSIDriver +metadata: + name: csi.hpe.com +spec: + podInfoOnMount: true + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "16") }} + volumeLifecycleModes: + - Persistent + - Ephemeral + {{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-csi-controller.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-csi-controller.yaml new file mode 100644 index 000000000..5fa269f69 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-csi-controller.yaml @@ -0,0 +1,240 @@ +--- + +############################################# +############ Controller driver ############ +############################################# + +kind: Deployment +apiVersion: apps/v1 +metadata: + name: hpe-csi-controller + namespace: {{ .Release.Namespace }} +spec: + replicas: 1 + selector: + matchLabels: + app: hpe-csi-controller + template: + metadata: + labels: + app: hpe-csi-controller + role: hpe-csi + spec: + serviceAccountName: hpe-csi-controller-sa + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "17") }} + priorityClassName: system-cluster-critical + {{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + dnsConfig: + options: + - name: ndots + value: "1" + containers: + - name: csi-provisioner + {{- if and (.Values.registry) (eq .Values.registry "quay.io") }} + image: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0 + {{- else if .Values.registry }} + image: {{ .Values.registry }}/sig-storage/csi-provisioner:v3.1.0 + {{- else }} + image: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0 + {{- end }} + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "13") }} + - "--timeout=30s" + - "--worker-threads=16" + {{- end }} + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy + - name: csi-attacher + {{- if and (.Values.registry) (eq .Values.registry "quay.io") }} + image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0 + {{- else if .Values.registry }} + image: {{ .Values.registry }}/sig-storage/csi-attacher:v3.4.0 + {{- else }} + image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0 + {{- end }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + {{- if and ( or (eq .Values.disable.primera false) (eq .Values.disable.alletra9000 false) ) ( or (eq .Values.disable.nimble true) (eq .Values.disable.alletra6000 true) ) }} + - "--timeout=180s" + {{- end }} + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy + - name: csi-snapshotter + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "20") }} + {{- if and (.Values.registry) (eq .Values.registry "quay.io") }} + image: k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1 + {{- else if .Values.registry }} + image: {{ .Values.registry }}/sig-storage/csi-snapshotter:v5.0.1 + {{- else }} + image: k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1 + {{- end }} + {{- else if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "17") }} + {{- if .Values.registry }} + image: {{ .Values.registry }}/k8scsi/csi-snapshotter:v3.0.3 + {{- else }} + image: quay.io/k8scsi/csi-snapshotter:v3.0.3 + {{- end }} + {{- end }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "15") }} + - name: csi-resizer + {{- if and (.Values.registry) (eq .Values.registry "quay.io") }} + image: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0 + {{- else if .Values.registry }} + image: {{ .Values.registry }}/sig-storage/csi-resizer:v1.4.0 + {{- else }} + image: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0 + {{- end }} + args: + - "--csi-address=$(ADDRESS)" + - "--v=5" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy + {{- end }} + - name: hpe-csi-driver + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/csi-driver:v2.1.1 + {{- else }} + image: quay.io/hpestorage/csi-driver:v2.1.1 + {{- end }} + args : + - "--endpoint=$(CSI_ENDPOINT)" + - "--flavor=kubernetes" + - "--pod-monitor" + - "--pod-monitor-interval=30" + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock + - name: LOG_LEVEL + value: {{ .Values.logLevel }} + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy + - name: log-dir + mountPath: /var/log + - name: k8s + mountPath: /etc/kubernetes + - name: hpeconfig + mountPath: /etc/hpe-storage + - name: root-dir + mountPath: /host + - name: csi-volume-mutator + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/volume-mutator:v1.3.1 + {{- else }} + image: quay.io/hpestorage/volume-mutator:v1.3.1 + {{- end }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi-extensions.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-volume-group-snapshotter + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/volume-group-snapshotter:v1.0.1 + {{- else }} + image: quay.io/hpestorage/volume-group-snapshotter:v1.0.1 + {{- end }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi-extensions.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-volume-group-provisioner + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/volume-group-provisioner:v1.0.1 + {{- else }} + image: quay.io/hpestorage/volume-group-provisioner:v1.0.1 + {{- end }} + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/csi-extensions.sock + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + - name: csi-extensions + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/csi-extensions:v1.2.1 + {{- else }} + image: quay.io/hpestorage/csi-extensions:v1.2.1 + {{- end }} + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + env: + - name: CSI_ENDPOINT + value: unix:///var/lib/csi/sockets/pluginproxy/csi-extensions.sock + - name: LOG_LEVEL + value: {{ .Values.logLevel }} + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + volumeMounts: + - name: socket-dir + mountPath: /var/lib/csi/sockets/pluginproxy/ + volumes: + - name: socket-dir + emptyDir: {} + - name: log-dir + hostPath: + path: /var/log + - name: k8s + hostPath: + path: /etc/kubernetes + - name: hpeconfig + hostPath: + path: /etc/hpe-storage + - name: root-dir + hostPath: + path: / + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 30 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 30 diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-csi-node.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-csi-node.yaml new file mode 100644 index 000000000..8becfd6f5 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-csi-node.yaml @@ -0,0 +1,201 @@ +--- + +####################################### +############ Node driver ############ +####################################### + +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: hpe-csi-node + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: hpe-csi-node + template: + metadata: + labels: + app: hpe-csi-node + role: hpe-csi + spec: + serviceAccountName: hpe-csi-node-sa + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "17") }} + priorityClassName: system-node-critical + {{- end }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + dnsConfig: + options: + - name: ndots + value: "1" + containers: + - name: csi-node-driver-registrar + {{- if and (.Values.registry) (eq .Values.registry "quay.io") }} + image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.5.0 + {{- else if .Values.registry }} + image: {{ .Values.registry }}/sig-storage/csi-node-driver-registrar:v2.5.0 + {{- else }} + image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.5.0 + {{- end}} + args: + - "--csi-address=$(ADDRESS)" + - "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)" + - "--v=5" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + {{- if .Values.kubeletRootDir }} + value: {{ .Values.kubeletRootDir }}/plugins/csi.hpe.com/csi.sock + {{- else }} + value: /var/lib/kubelet/plugins/csi.hpe.com/csi.sock + {{- end }} + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( eq ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "12") }} + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + {{- end }} + imagePullPolicy: "Always" + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: registration-dir + mountPath: /registration + - name: hpe-csi-driver + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/csi-driver:v2.1.1 + {{- else }} + image: quay.io/hpestorage/csi-driver:v2.1.1 + {{- end}} + args : + - "--endpoint=$(CSI_ENDPOINT)" + - "--node-service" + - "--flavor=kubernetes" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: LOG_LEVEL + value: {{ .Values.logLevel }} + - name: NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + {{ if and .Values.iscsi.chapUser .Values.iscsi.chapPassword }} + - name: CHAP_USER + value: {{ .Values.iscsi.chapUser }} + - name: CHAP_PASSWORD + value: {{ .Values.iscsi.chapPassword }} + {{- end }} + {{ if .Values.disableNodeConformance -}} + - name: DISABLE_NODE_CONFORMANCE + value: "true" + {{- end }} + {{- if .Values.kubeletRootDir }} + - name: KUBELET_ROOT_DIR + value: {{ .Values.kubeletRootDir }} + {{- end }} + {{ if .Values.disableNodeGetVolumeStats -}} + - name: DISABLE_NODE_GET_VOLUMESTATS + value: "true" + {{- end }} + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: pods-mount-dir + {{- if .Values.kubeletRootDir }} + mountPath: {{ .Values.kubeletRootDir }} + {{- else }} + mountPath: /var/lib/kubelet + {{- end }} + # needed so that any mounts setup inside this container are + # propagated back to the host machine. + mountPropagation: "Bidirectional" + - name: root-dir + mountPath: /host + mountPropagation: "Bidirectional" + - name: device-dir + mountPath: /dev + - name: log-dir + mountPath: /var/log + - name: etc-hpe-storage-dir + mountPath: /etc/hpe-storage + - name: etc-kubernetes + mountPath: /etc/kubernetes + - name: sys + mountPath: /sys + - name: runsystemd + mountPath: /run/systemd + - name: etcsystemd + mountPath: /etc/systemd/system + - name: linux-config-file + mountPath: /opt/hpe-storage/nimbletune/config.json + subPath: config.json + volumes: + - name: registration-dir + hostPath: + {{ if .Values.kubeletRootDir }} + path: {{ .Values.kubeletRootDir }}/plugins_registry + {{- else }} + path: /var/lib/kubelet/plugins_registry + {{- end }} + type: Directory + - name: plugin-dir + hostPath: + {{ if .Values.kubeletRootDir }} + path: {{ .Values.kubeletRootDir }}/plugins/csi.hpe.com + {{- else }} + path: /var/lib/kubelet/plugins/csi.hpe.com + {{- end }} + type: DirectoryOrCreate + - name: pods-mount-dir + hostPath: + {{ if .Values.kubeletRootDir }} + path: {{ .Values.kubeletRootDir }} + {{- else }} + path: /var/lib/kubelet + {{- end }} + - name: root-dir + hostPath: + path: / + - name: device-dir + hostPath: + path: /dev + - name: log-dir + hostPath: + path: /var/log + - name: etc-hpe-storage-dir + hostPath: + path: /etc/hpe-storage + - name: etc-kubernetes + hostPath: + path: /etc/kubernetes + - name: runsystemd + hostPath: + path: /run/systemd + - name: etcsystemd + hostPath: + path: /etc/systemd/system + - name: sys + hostPath: + path: /sys + - name: linux-config-file + configMap: + name: hpe-linux-config + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 30 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 30 diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-csi-rbac.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-csi-rbac.yaml new file mode 100644 index 000000000..fd72c9956 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-csi-rbac.yaml @@ -0,0 +1,565 @@ +--- + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-provisioner-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "create"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["serviceaccounts"] + verbs: ["get", "list", "create"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: [""] + resources: ["services"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["apps"] + resources: ["deployments"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] +{{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "17") }} + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] +{{- end }} + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "delete"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch", "delete"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-provisioner-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: hpe-csi-provisioner-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-attacher-role +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["get", "list", "watch", "update", "create", "delete", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "watch", "list"] + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( eq ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "12") }} + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + {{- else if and (eq .Capabilities.KubeVersion.Major "1") ( eq ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "13") }} + - apiGroups: ["csi.storage.k8s.io"] + resources: ["csinodeinfos"] + verbs: ["get", "list", "watch"] + {{ else }} + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + {{- end }} + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-attacher-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: hpe-csi-attacher-role + apiGroup: rbac.authorization.k8s.io + + +{{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "17") }} +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-snapshotter-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["create", "update", "delete", "get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "update", "delete", "get", "list", "watch", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-snapshotter-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: hpe-csi-snapshotter-role + apiGroup: rbac.authorization.k8s.io + +{{- end }} + +{{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "15") }} +--- +# Resizer must be able to work with PVCs, PVs, SCs. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: external-resizer-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-resizer-role +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: external-resizer-role + apiGroup: rbac.authorization.k8s.io + +--- + +# Resizer must be able to work with end point in current namespace +# if (and only if) leadership election is enabled +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: {{ .Release.Namespace }} + name: external-resizer-cfg +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- + +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-resizer-role-cfg + namespace: {{ .Release.Namespace }} +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: Role + name: external-resizer-cfg + apiGroup: rbac.authorization.k8s.io + + +--- +# cluster role to support volumegroup +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-volumegroup-role +rules: + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroups"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroupcontents"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroupclasses"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroups/status"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroupcontents/status"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "create"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete", "get", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-volumegroup-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: hpe-csi-volumegroup-role + apiGroup: rbac.authorization.k8s.io + +--- +# cluster role to support snapshotgroup +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-snapshotgroup-role +rules: + - apiGroups: ["storage.hpe.com"] + resources: ["snapshotgroups"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["snapshotgroupcontents"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["snapshotgroupclasses"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["snapshotgroups/status"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["snapshotgroupcontents/status"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list", "create"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete", "get", "update"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroups"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroupcontents"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.hpe.com"] + resources: ["volumegroupclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list"] + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-snapshotgroup-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: hpe-csi-snapshotgroup-role + apiGroup: rbac.authorization.k8s.io + +--- +# mutator must be able to work with PVCs, PVs, SCs. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-mutator-role +rules: + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["update", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-mutator-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + # replace with non-default namespace name + namespace: {{ .Release.Namespace }} + +roleRef: + kind: ClusterRole + name: csi-mutator-role + apiGroup: rbac.authorization.k8s.io + +--- +# mutator must be able to work with end point in current namespace +# if (and only if) leadership election is enabled +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: {{ .Release.Namespace }} + name: csi-mutator-cfg +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-mutator-role-cfg + namespace: {{ .Release.Namespace }} +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} + +roleRef: + kind: Role + name: csi-mutator-cfg + apiGroup: rbac.authorization.k8s.io +{{- end }} + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-driver-role +rules: + - apiGroups: ["storage.hpe.com"] + resources: ["hpenodeinfos"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["hpevolumeinfos"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["hpereplicationdeviceinfos"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["hpevolumegroupinfos"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: ["storage.hpe.com"] + resources: ["hpesnapshotgroupinfos"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["services"] + verbs: ["get"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list"] + - apiGroups: [""] + resources: ["namespaces"] + verbs: ["get", "list"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: hpe-csi-node-sa + namespace: {{ .Release.Namespace }} + +--- + +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: hpe-csi-driver-binding +subjects: + - kind: ServiceAccount + name: hpe-csi-controller-sa + namespace: {{ .Release.Namespace }} + - kind: ServiceAccount + name: hpe-csi-node-sa + namespace: {{ .Release.Namespace }} + - kind: ServiceAccount + name: hpe-csp-sa + namespace: {{ .Release.Namespace }} + +roleRef: + kind: ClusterRole + name: hpe-csi-driver-role + apiGroup: rbac.authorization.k8s.io + +--- + +kind: ServiceAccount +apiVersion: v1 +metadata: + name: hpe-csp-sa + namespace: {{ .Release.Namespace }} diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-linux-config.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-linux-config.yaml new file mode 100644 index 000000000..5e4c4944a --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/hpe-linux-config.yaml @@ -0,0 +1,13 @@ +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: hpe-linux-config + namespace: {{ .Release.Namespace }} +data: +{{ if and .Values.iscsi.chapUser .Values.iscsi.chapPassword }} + CHAP_USER: {{ .Values.iscsi.chapUser | quote }} + CHAP_PASSWORD: {{ .Values.iscsi.chapPassword | quote }} +{{- end }} + config.json: |- +{{ (.Files.Get "files/config.json") | indent 4 }} \ No newline at end of file diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/nimble-csp.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/nimble-csp.yaml new file mode 100644 index 000000000..094832790 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/nimble-csp.yaml @@ -0,0 +1,87 @@ +{{- if not .Values.disable.alletra6000 }} + +--- +### Alletra 6000 CSP Service ### +kind: Service +apiVersion: v1 +metadata: + name: alletra6000-csp-svc + namespace: {{ .Release.Namespace }} + labels: + app: alletra6000-csp-svc +spec: + ports: + - port: 8080 + protocol: TCP + selector: + app: nimble-csp +{{- end }} + +{{- if not .Values.disable.nimble }} +--- +### Nimble CSP Service ### +kind: Service +apiVersion: v1 +metadata: + name: nimble-csp-svc + namespace: {{ .Release.Namespace }} + labels: + app: nimble-csp-svc +spec: + ports: + - port: 8080 + protocol: TCP + selector: + app: nimble-csp +{{- end }} + + +{{- if or (not .Values.disable.alletra6000) (not .Values.disable.nimble) }} +--- +### CSP deployment ### +kind: Deployment +apiVersion: apps/v1 +metadata: + name: nimble-csp + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: nimble-csp + replicas: 1 + template: + metadata: + labels: + app: nimble-csp + spec: + serviceAccountName: hpe-csp-sa + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "17") }} + priorityClassName: system-cluster-critical + {{- end }} + containers: + - name: nimble-csp + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/alletra-6000-and-nimble-csp:v2.1.1 + {{- else }} + image: quay.io/hpestorage/alletra-6000-and-nimble-csp:v2.1.1 + {{- end }} + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + ports: + - containerPort: 8080 + volumeMounts: + - name: log-dir + mountPath: /var/log + volumes: + - name: log-dir + hostPath: + path: /var/log + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 30 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 30 +{{- end }} diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/primera-3par-csp.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/primera-3par-csp.yaml new file mode 100644 index 000000000..2208aef56 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/templates/primera-3par-csp.yaml @@ -0,0 +1,94 @@ +{{- if not .Values.disable.alletra9000 }} +--- +### Alletra9000 CSP Service ### +kind: Service +apiVersion: v1 +metadata: + name: alletra9000-csp-svc + namespace: {{ .Release.Namespace }} + labels: + app: alletra9000-csp-svc +spec: + ports: + - port: 8080 + protocol: TCP + selector: + app: primera3par-csp + +{{- end }} + +{{- if not .Values.disable.primera }} +--- +### Primera3par CSP Service ### +kind: Service +apiVersion: v1 +metadata: + name: primera3par-csp-svc + namespace: {{ .Release.Namespace }} + labels: + app: primera3par-csp-svc +spec: + ports: + - port: 8080 + protocol: TCP + selector: + app: primera3par-csp +{{- end }} + +{{- if or (not .Values.disable.alletra9000) (not .Values.disable.primera) }} + +--- +### CSP deployment ### +kind: Deployment +apiVersion: apps/v1 +metadata: + name: primera3par-csp + labels: + app: primera3par-csp + namespace: {{ .Release.Namespace }} +spec: + selector: + matchLabels: + app: primera3par-csp + replicas: 1 + template: + metadata: + labels: + app: primera3par-csp + spec: + serviceAccountName: hpe-csp-sa + {{- if and (eq .Capabilities.KubeVersion.Major "1") ( ge ( trimSuffix "+" .Capabilities.KubeVersion.Minor ) "17") }} + priorityClassName: system-cluster-critical + {{- end }} + containers: + - name: primera3par-csp + {{- if .Values.registry }} + image: {{ .Values.registry }}/hpestorage/alletra-9000-primera-and-3par-csp:v2.1.1 + {{- else }} + image: quay.io/hpestorage/alletra-9000-primera-and-3par-csp:v2.1.1 + {{- end }} + imagePullPolicy: {{ .Values.imagePullPolicy | quote }} + env: + - name: CRD_CLIENT_CONFIG_QPS + value: "35" + - name: CRD_CLIENT_CONFIG_BURST + value: "20" + ports: + - containerPort: 8080 + volumeMounts: + - name: log-dir + mountPath: /var/log + volumes: + - name: log-dir + hostPath: + path: /var/log + tolerations: + - effect: NoExecute + key: node.kubernetes.io/not-ready + operator: Exists + tolerationSeconds: 30 + - effect: NoExecute + key: node.kubernetes.io/unreachable + operator: Exists + tolerationSeconds: 30 +{{- end }} diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/values.schema.json b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/values.schema.json new file mode 100644 index 000000000..f4f291690 --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/values.schema.json @@ -0,0 +1,159 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "$id": "http://example.com/example.json", + "title": "HPE CSI Driver for Kubernetes Helm Chart JSON Schema", + "type": "object", + "default": + { + "disable": { + "nimble": false, + "primera": false, + "alletra6000": false, + "alletra9000": false + }, + "disableNodeConformance": false, + "imagePullPolicy": "IfNotPresent", + "iscsi": { + "chapUser": "", + "chapPassword": "" + }, + "logLevel": "info", + "registry": "quay.io", + "kubeletRootDir": "/var/lib/kubelet/", + "disableNodeGetVolumeStats": false + }, + "required": [ + "disable", + "disableNodeConformance", + "imagePullPolicy", + "iscsi", + "logLevel", + "registry", + "kubeletRootDir", + "disableNodeGetVolumeStats" + ], + "properties": { + "disable": { + "$id": "#/properties/disable", + "title": "CSP Deployment and Service backend exclusion", + "description": "All backend Deployments and Services are installed by default.", + "type": "object", + "default": + { + "nimble": false, + "primera": false, + "alletra6000": false, + "alletra9000": false + }, + "required": [ + "nimble", + "primera", + "alletra6000", + "alletra9000" + ], + "properties": { + "nimble": { + "$id": "#/properties/disable/properties/nimble", + "title": "HPE Nimble Storage", + "type": "boolean", + "default": false + }, + "primera": { + "$id": "#/properties/disable/properties/primera", + "title": "HPE Primera", + "type": "boolean", + "default": false + }, + "alletra6000": { + "$id": "#/properties/disable/properties/alletra6000", + "title": "HPE Alletra 6000", + "type": "boolean", + "default": false + }, + "alletra9000": { + "$id": "#/properties/disable/properties/alletra9000", + "title": "HPE Alletra 9000", + "type": "boolean", + "default": false + } + }, + "additionalProperties": false + }, + "disableNodeConformance": { + "$id": "#/properties/disableNodeConformance", + "title": "Disable node conformance", + "description": "Disabling node conformance forces the cluster administrator to install required packages and ensure the correct node services are started to use external block storage.", + "type": "boolean", + "default": false + }, + "imagePullPolicy": { + "$id": "#/properties/imagePullPolicy", + "title": "CSI driver image pull policy", + "type": "string", + "default": "IfNotPresent", + "enum": [ "Always", "IfNotPresent", "Never" ] + }, + "iscsi": { + "$id": "#/properties/iscsi", + "title": "iSCSI CHAP credentials", + "type": "object", + "default": + { + "chapUser": "", + "chapPassword": "" + }, + "required": [ + "chapUser", + "chapPassword" + ], + "properties": { + "chapUser": { + "$id": "#/properties/iscsi/properties/chapUser", + "title": "CHAP username", + "type": "string", + "default": "" + }, + "chapPassword": { + "$id": "#/properties/iscsi/properties/chapPassword", + "title": "CHAP password", + "description": "Between 12 and 16 characters", + "type": "string", + "default": "", + "pattern": "^$|^[a-zA-Z0-9+_)(*^%$#@!]{12,16}$" + } + }, + "additionalProperties": false + }, + "logLevel": { + "$id": "#/properties/logLevel", + "title": "Set the log level of the HPE CSI Driver images", + "type": "string", + "default": "info", + "enum": [ "info", "debug", "trace", "warn", "error" ] + }, + "registry": { + "$id": "#/properties/registry", + "title": "Pull images from a different registry than default", + "description": "SIG Storage images needs to be mirrored from k8s.gcr.io to this registry if this parameter is changed.", + "type": "string", + "default": "quay.io" + }, + "kubeletRootDir": { + "$id": "#/properties/kubeletRootDir", + "title": "Kubelet root directory", + "description": "Only change this if the kubelet root dir has been altered by the Kubernetes platform installer.", + "type": "string", + "default": "/var/lib/kubelet", + "pattern": "^/" + }, + "disableNodeGetVolumeStats": { + "$id": "#/properties/disableNodeGetVolumeStats", + "title": "Disable the CSI nodeGetVolumeStats call", + "description": "In very large environments, disabling this feature may alleviate pressure on the CSP.", + "type": "boolean", + "default": false + }, + "global": {} + }, + "additionalProperties": false +} diff --git a/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/values.yaml b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/values.yaml new file mode 100644 index 000000000..b81cefa3f --- /dev/null +++ b/charts/hpe-csi-driver/hpe-csi-driver/2.1.1/values.yaml @@ -0,0 +1,34 @@ +# Default values for hpe-csi-driver Helm chart +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +# Control CSP Service and Deployments for HPE storage products +disable: + nimble: false + primera: false + alletra6000: false + alletra9000: false + +# For controlling automatic iscsi/multipath package installation +disableNodeConformance: false + +# imagePullPolicy applied for all hpe-csi-driver images +imagePullPolicy: "IfNotPresent" + +# Cluster wide values for CHAP authentication +iscsi: + chapUser: "" + chapPassword: "" + +# Log level for all hpe-csi-driver components +logLevel: "info" + +# Registry prefix for hpe-csi-driver images +registry: "quay.io" + +# Kubelet root directory path +kubeletRootDir: "/var/lib/kubelet/" + +# NodeGetVolumestats will be called by default, set true to disable the call +disableNodeGetVolumeStats: false + diff --git a/charts/k10/k10/4.5.1400/Chart.yaml b/charts/k10/k10/4.5.1400/Chart.yaml new file mode 100644 index 000000000..816a4b0f7 --- /dev/null +++ b/charts/k10/k10/4.5.1400/Chart.yaml @@ -0,0 +1,15 @@ +annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: K10 + catalog.cattle.io/release-name: k10 +apiVersion: v2 +appVersion: 4.5.14 +description: Kasten’s K10 Data Management Platform +home: https://kasten.io/ +icon: https://docs.kasten.io/_static/kasten-logo-vertical.png +kubeVersion: '>= 1.17.0-0' +maintainers: +- email: support@kasten.io + name: kastenIO +name: k10 +version: 4.5.1400 diff --git a/charts/k10/k10/4.5.1400/README.md b/charts/k10/k10/4.5.1400/README.md new file mode 100644 index 000000000..6000d693a --- /dev/null +++ b/charts/k10/k10/4.5.1400/README.md @@ -0,0 +1,227 @@ +# Kasten's K10 Helm chart. + +[Kasten's k10](https://docs.kasten.io/) is a data lifecycle management system for all your persistence.enabled container-based applications. + +## TL;DR; + +```console +$ helm install kasten/k10 --name=k10 --namespace=kasten-io +``` + +## Introduction + +This chart bootstraps Kasten's K10 platform on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + - Kubernetes 1.7+ with Beta APIs enabled + +## Installing the Chart + +To install the chart on a [GKE](https://cloud.google.com/container-engine/) cluster + +```console +$ helm install kasten/k10 --name=k10 --namespace=kasten-io +``` + +To install the chart on an [AWS](https://aws.amazon.com/) [kops](https://github.com/kubernetes/kops)-created cluster + +```console +$ helm install kasten/k10 --name=k10 --namespace=kasten-io --set secrets.awsAccessKeyId="${AWS_ACCESS_KEY_ID}" \ + --set secrets.awsSecretAccessKey="${AWS_SECRET_ACCESS_KEY}" +``` + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `k10` application: + +```console +$ helm delete k10 --purge +``` + +## Configuration + +The following table lists the configurable parameters of the K10 +chart and their default values. + +Parameter | Description | Default +--- | --- | --- +`eula.accept`| Whether to enable accept EULA before installation | `false` +`eula.company` | Company name. Required field if EULA is accepted | `None` +`eula.email` | Contact email. Required field if EULA is accepted | `None` +`license` | License string obtained from Kasten | `None` +`rbac.create` | Whether to enable RBAC with a specific cluster role and binding for K10 | `true` +`scc.create` | Whether to create a SecurityContextConstraints for K10 ServiceAccounts | `false` +`services.dashboardbff.hostNetwork` | Whether the dashboardbff pods may use the node network | `false` +`services.executor.hostNetwork` | Whether the executor pods may use the node network | `false` +`services.aggregatedapis.hostNetwork` | Whether the aggregatedapis pods may use the node network | `false` +`serviceAccount.create`| Specifies whether a ServiceAccount should be created | `true` +`serviceAccount.name` | The name of the ServiceAccount to use. If not set, a name is derived using the release and chart names. | `None` +`ingress.create` | Specifies whether the K10 dashboard should be exposed via ingress | `false` +`ingress.class` | Cluster ingress controller class: `nginx`, `GCE` | `None` +`ingress.host` | FQDN (e.g., `k10.example.com`) for name-based virtual host | `None` +`ingress.urlPath` | URL path for K10 Dashboard (e.g., `/k10`) | `Release.Name` +`ingress.annotations` | Additional Ingress object annotations | `{}` +`ingress.tls.enabled` | Configures a TLS use for `ingress.host` | `false` +`ingress.tls.secretName` | Specifies a name of TLS secret | `None` +`ingress.pathType` | Specifies the path type for the ingress resource | `ImplementationSpecific` +`global.persistence.enabled` | Use PVS to persist data | `true` +`global.persistence.size` | Default global size of volumes for K10 persistent services | `20Gi` +`global.persistence.catalog.size` | Size of a volume for catalog service | `global.persistence.size` +`global.persistence.jobs.size` | Size of a volume for jobs service | `global.persistence.size` +`global.persistence.logging.size` | Size of a volume for logging service | `global.persistence.size` +`global.persistence.metering.size` | Size of a volume for metering service | `global.persistence.size` +`global.persistence.storageClass` | Specified StorageClassName will be used for PVCs | `None` +`global.airgapped.repository` | Specify the helm repository for offline (airgapped) installation | `''` +`global.imagePullSecret` | Provide secret which contains docker config for private repository. Use `k10-ecr` when secrets.dockerConfigPath is used. | `''` +`secrets.awsAccessKeyId` | AWS access key ID (required for AWS deployment) | `None` +`secrets.awsSecretAccessKey` | AWS access key secret | `None` +`secrets.awsIamRole` | ARN of the AWS IAM role assumed by K10 to perform any AWS operation. | `None` +`secrets.googleApiKey` | Non-default base64 encoded GCP Service Account key file | `None` +`secrets.azureTenantId` | Azure tenant ID (required for Azure deployment) | `None` +`secrets.azureClientId` | Azure Service App ID | `None` +`secrets.azureClientSecret` | Azure Service APP secret | `None` +`secrets.azureResourceGroup` | Resource Group name that was created for the Kubernetes cluster | `None` +`secrets.azureSubscriptionID` | Subscription ID in your Azure tenant | `None` +`secrets.azureResourceMgrEndpoint` | Resource management endpoint for the Azure Stack instance | `None` +`secrets.azureADEndpoint` | Azure Active Directory login endpoint | `None` +`secrets.azureADResourceID` | Azure Active Directory resource ID to obtain AD tokens | `None` +`secrets.azureCloudEnvID` | Azure Cloud Environment ID | `None` +`secrets.vsphereEndpoint` | vSphere endpoint for login | `None` +`secrets.vsphereUsername` | vSphere username for login | `None` +`secrets.vspherePassword` | vSphere password for login | `None` +`secrets.dockerConfigPath` | Use --set-file secrets.dockerConfigPath=path_to_docker_config.yaml to specify docker config for image pull | `None` +`cacertconfigmap.name` | Name of the ConfigMap that contains a certificate for a trusted root certificate authority | `None` +`clusterName` | Cluster name for better logs visibility | `None` +`metering.awsRegion` | Sets AWS_REGION for metering service | `None` +`metering.mode` | Control license reporting (set to `airgap` for private-network installs) | `None` +`metering.reportCollectionPeriod` | Sets metric report collection period (in seconds) | `1800` +`metering.reportPushPeriod` | Sets metric report push period (in seconds) | `3600` +`metering.promoID` | Sets K10 promotion ID from marketing campaigns | `None` +`metering.awsMarketplace` | Sets AWS cloud metering license mode | `false` +`metering.awsManagedLicense` | Sets AWS managed license mode | `false` +`metering.redhatMarketplacePayg` | Sets Red Hat cloud metering license mode | `false` +`metering.licenseConfigSecretName` | Sets AWS managed license config secret | `None` +`externalGateway.create` | Configures an external gateway for K10 API services | `false` +`externalGateway.annotations` | Standard annotations for the services | `None` +`externalGateway.fqdn.name` | Domain name for the K10 API services | `None` +`externalGateway.fqdn.type` | Supported gateway type: `route53-mapper` or `external-dns` | `None` +`externalGateway.awsSSLCertARN` | ARN for the AWS ACM SSL certificate used in the K10 API server | `None` +`auth.basicAuth.enabled` | Configures basic authentication for the K10 dashboard | `false` +`auth.basicAuth.htpasswd` | A username and password pair separated by a colon character | `None` +`auth.basicAuth.secretName` | Name of an existing Secret that contains a file generated with htpasswd | `None` +`auth.k10AdminGroups` | A list of groups whose members are granted admin level access to K10's dashboard | `None` +`auth.k10AdminUsers` | A list of users who are granted admin level access to K10's dashboard | `None` +`auth.tokenAuth.enabled` | Configures token based authentication for the K10 dashboard | `false` +`auth.oidcAuth.enabled` | Configures Open ID Connect based authentication for the K10 dashboard | `false` +`auth.oidcAuth.providerURL` | URL for the OIDC Provider | `None` +`auth.oidcAuth.redirectURL` | URL to the K10 gateway service | `None` +`auth.oidcAuth.scopes` | Space separated OIDC scopes required for userinfo. Example: "profile email" | `None` +`auth.oidcAuth.prompt` | The type of prompt to be used during authentication (none, consent, login or select_account) | `select_account` +`auth.oidcAuth.clientID` | Client ID given by the OIDC provider for K10 | `None` +`auth.oidcAuth.clientSecret` | Client secret given by the OIDC provider for K10 | `None` +`auth.oidcAuth.usernameClaim` | The claim to be used as the username | `sub` +`auth.oidcAuth.usernamePrefix` | Prefix that has to be used with the username obtained from the username claim | `None` +`auth.oidcAuth.groupClaim` | Name of a custom OpenID Connect claim for specifying user groups | `None` +`auth.oidcAuth.groupPrefix` | All groups will be prefixed with this value to prevent conflicts | `None` +`auth.openshift.enabled` | Enables access to the K10 dashboard by authenticating with the OpenShift OAuth server | `false` +`auth.openshift.serviceAccount` | Name of the service account that represents an OAuth client | `None` +`auth.openshift.clientSecret` | The token corresponding to the service account | `None` +`auth.openshift.dashboardURL` | The URL used for accessing K10's dashboard | `None` +`auth.openshift.openshiftURL` | The URL for accessing OpenShift's API server | `None` +`auth.openshift.insecureCA` | To turn off SSL verification of connections to OpenShift | `false` +`auth.openshift.useServiceAccountCA` | Set this to true to use the CA certificate corresponding to the Service Account ``auth.openshift.serviceAccount`` usually found at ``/var/run/secrets/kubernetes.io/serviceaccount/ca.crt`` | `false` +`auth.ldap.enabled` | Configures Active Directory/LDAP based authentication for the K10 dashboard | `false` +`auth.ldap.restartPod` | To force a restart of the authentication service pod (useful when updating authentication config) | `false` +`auth.ldap.dashboardURL` | The URL used for accessing K10's dashboard | `None` +`auth.ldap.host` | Host and optional port of the AD/LDAP server in the form `host:port` | `None` +`auth.ldap.insecureNoSSL` | Required if the AD/LDAP host is not using TLS | `false` +`auth.ldap.insecureSkipVerifySSL` | To turn off SSL verification of connections to the AD/LDAP host | `false` +`auth.ldap.startTLS` | When set to true, ldap:// is used to connect to the server followed by creation of a TLS session. When set to false, ldaps:// is used. | `false` +`auth.ldap.bindDN` | The Distinguished Name(username) used for connecting to the AD/LDAP host | `None` +`auth.ldap.bindPW` | The password corresponding to the `bindDN` for connecting to the AD/LDAP host | `None` +`auth.ldap.bindPWSecretName` | The name of the secret that contains the password corresponding to the `bindDN` for connecting to the AD/LDAP host | `None` +`auth.ldap.userSearch.baseDN` | The base Distinguished Name to start the AD/LDAP search from | `None` +`auth.ldap.userSearch.filter` | Optional filter to apply when searching the directory | `None` +`auth.ldap.userSearch.username` | Attribute used for comparing user entries when searching the directory | `None` +`auth.ldap.userSearch.idAttr` | AD/LDAP attribute in a user's entry that should map to the user ID field in a token | `None` +`auth.ldap.userSearch.emailAttr` | AD/LDAP attribute in a user's entry that should map to the email field in a token | `None` +`auth.ldap.userSearch.nameAttr` | AD/LDAP attribute in a user's entry that should map to the name field in a token | `None` +`auth.ldap.userSearch.preferredUsernameAttr` | AD/LDAP attribute in a user's entry that should map to the preferred_username field in a token | `None` +`auth.ldap.groupSearch.baseDN` | The base Distinguished Name to start the AD/LDAP group search from | `None` +`auth.ldap.groupSearch.filter` | Optional filter to apply when searching the directory for groups | `None` +`auth.ldap.groupSearch.nameAttr` | The AD/LDAP attribute that represents a group's name in the directory | `None` +`auth.ldap.groupSearch.userMatchers` | List of field pairs that are used to match a user to a group. | `None` +`auth.ldap.groupSearch.userMatchers.userAttr` | Attribute in the user's entry that must match with the `groupAttr` while searching for groups | `None` +`auth.ldap.groupSearch.userMatchers.groupAttr` | Attribute in the group's entry that must match with the `userAttr` while searching for groups | `None` +`auth.groupAllowList` | A list of groups whose members are allowed access to K10's dashboard | `None` +`services.securityContext` | Custom [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) for K10 service containers | `{"runAsUser" : 1000, "fsGroup": 1000}` +`services.securityContext.runAsUser` | User ID K10 service containers run as| `1000` +`services.securityContext.runAsGroup` | Group ID K10 service containers run as| `1000` +`services.securityContext.fsGroup` | FSGroup that owns K10 service container volumes | `1000` +`injectKanisterSidecar.enabled` | Enable Kanister sidecar injection for workload pods | `false` +`injectKanisterSidecar.namespaceSelector.matchLabels` | Set of labels to select namespaces in which sidecar injection is enabled for workloads | `{}` +`injectKanisterSidecar.objectSelector.matchLabels` | Set of labels to filter workload objects in which the sidecar is injected | `{}` +`injectKanisterSidecar.webhookServer.port` | Port number on which the mutating webhook server accepts request | `8080` +`gateway.insecureDisableSSLVerify` | Specifies whether to disable SSL verification for gateway pods | `false` +`gateway.exposeAdminPort` | Specifies whether to expose Admin port for gateway service | `true` +`genericVolumeSnapshot.resources.[requests\|limits].[cpu\|memory]` | Resource requests and limits for Generic Volume Snapshot restore pods | `{}` +`prometheus.server.enabled` | If false, K10's Prometheus server will not be created, reducing the dashboard's functionality. | `true` +`prometheus.server.persistentVolume.enabled` | If true, K10 Prometheus server will create a Persistent Volume Claim | `true` +`prometheus.server.persistentVolume.size` | K10 Prometheus server data Persistent Volume size | `30Gi` +`prometheus.server.persistentVolume.storageClass` | StorageClassName used to create Prometheus PVC. Setting this option overwrites global StorageClass value | `""` +`prometheus.server.retention` | (optional) K10 Prometheus data retention | `"30d"` +`prometheus.server.baseURL` | (optional) K10 Prometheus external url path at which the server can be accessed | `/k10/prometheus/` +`prometheus.server.prefixURL` | (optional) K10 Prometheus prefix slug at which the server can be accessed | `/k10/prometheus/` +`grafana.enabled` | (optional) If false Grafana will not be available | `true` +`grafana.prometheusPrefixURL` | (optional) URL for Prometheus datasource in Grafana (must match `prometheus.server.prefixURL`) | `/k10/prometheus/` +`resources...[requests\|limits].[cpu\|memory]` | Overwrite default K10 [container resource requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/) | varies by container +`route.enabled` | Specifies whether the K10 dashboard should be exposed via route | `false` +`route.host` | FQDN (e.g., `.k10.example.com`) for name-based virtual host | `""` +`route.path` | URL path for K10 Dashboard (e.g., `/k10`) | `/` +`route.annotations` | Additional Route object annotations | `{}` +`route.labels` | Additional Route object labels | `{}` +`route.tls.enabled` | Configures a TLS use for `route.host` | `false` +`route.tls.insecureEdgeTerminationPolicy` | Specifies behavior for insecure scheme traffic | `Redirect` +`route.tls.termination` | Specifies the TLS termination of the route | `edge` +`apigateway.serviceResolver` | Specifies the resolver used for service discovery in the API gateway (`dns` or `endpoint`) | `dns` +`limiter.genericVolumeSnapshots` | Limit of concurrent generic volume snapshot create operations | `10` +`limiter.genericVolumeCopies` | Limit of concurrent generic volume snapshot copy operations | `10` +`limiter.genericVolumeRestores` | Limit of concurrent generic volume snapshot restore operations | `10` +`limiter.csiSnapshots` | Limit of concurrent CSI snapshot create operations | `10` +`limiter.providerSnapshots` | Limit of concurrent cloud provider create operations | `10` +`cluster.domainName` | Specifies the domain name of the cluster | `cluster.local` +`kanister.backupTimeout` | Specifies timeout to set on Kanister backup operations | `45` +`kanister.restoreTimeout` | Specifies timeout to set on Kanister restore operations | `600` +`kanister.deleteTimeout` | Specifies timeout to set on Kanister delete operations | `45` +`kanister.hookTimeout` | Specifies timeout to set on Kanister pre-hook and post-hook operations | `20` +`kanister.checkRepoTimeout` | Specifies timeout to set on Kanister checkRepo operations | `20` +`kanister.statsTimeout` | Specifies timeout to set on Kanister stats operations | `20` +`kanister.efsPostRestoreTimeout` | Specifies timeout to set on Kanister efsPostRestore operations | `45` +`awsConfig.assumeRoleDuration` | Duration of a session token generated by AWS for an IAM role. The minimum value is 15 minutes and the maximum value is the maximum duration setting for that IAM role. For documentation about how to view and edit the maximum session duration for an IAM role see https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session. The value accepts a number along with a single character ``m``(for minutes) or ``h`` (for hours) Examples: 60m or 2h | `''` +`awsConfig.efsBackupVaultName` | Specifies the AWS EFS backup vault name | `k10vault` +`vmWare.taskTimeoutMin` | Specifies the timeout for VMWare operations | `60` +`encryption.primaryKey.awsCmkKeyId` | Specifies the AWS CMK key ID for encrypting K10 Primary Key | `None` +## Helm tips and tricks + +There is a way of setting values via a yaml file instead of using `--set`. +You can copy/paste values into a file (e.g., my_values.yaml): + +```yaml +secrets: + awsAccessKeyId: ${AWS_ACCESS_KEY_ID} + awsSecretAccessKey: ${AWS_SECRET_ACCESS_KEY} +``` +and then run: +```bash + envsubst < my_values.yaml > my_values_out.yaml && helm install helm/k10 -f my_values_out.yaml +``` + +To use non-default GCP ServiceAccount (SA) credentials, the credentials JSON file needs to be encoded into a base64 string. + + +```bash + sa_key=$(base64 -w0 sa-key.json) + helm install kasten/k10 --name=k10 --namespace=kasten-io --set secrets.googleApiKey=$sa_key +``` diff --git a/charts/k10/k10/4.5.1400/app-readme.md b/charts/k10/k10/4.5.1400/app-readme.md new file mode 100644 index 000000000..1b221891b --- /dev/null +++ b/charts/k10/k10/4.5.1400/app-readme.md @@ -0,0 +1,5 @@ +The K10 data management platform, purpose-built for Kubernetes, provides enterprise operations teams an easy-to-use, scalable, and secure system for backup/restore, disaster recovery, and mobility of Kubernetes applications. + +K10’s application-centric approach and deep integrations with relational and NoSQL databases, Kubernetes distributions, and all clouds provide teams the freedom of infrastructure choice without sacrificing operational simplicity. Policy-driven and extensible, K10 provides a native Kubernetes API and includes features such as full-spectrum consistency, database integrations, automatic application discovery, multi-cloud mobility, and a powerful web-based user interface. + +For more information, refer to the docs [https://docs.kasten.io/](https://docs.kasten.io/) diff --git a/charts/k10/k10/4.5.1400/charts/grafana/.helmignore b/charts/k10/k10/4.5.1400/charts/grafana/.helmignore new file mode 100644 index 000000000..8cade1318 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.vscode +.project +.idea/ +*.tmproj +OWNERS diff --git a/charts/k10/k10/4.5.1400/charts/grafana/Chart.yaml b/charts/k10/k10/4.5.1400/charts/grafana/Chart.yaml new file mode 100644 index 000000000..e2e2ba77a --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/Chart.yaml @@ -0,0 +1,22 @@ +apiVersion: v2 +appVersion: 8.1.0 +description: The leading tool for querying and visualizing time series and metrics. +home: https://grafana.net +icon: https://raw.githubusercontent.com/grafana/grafana/master/public/img/logo_transparent_400x.png +kubeVersion: ^1.8.0-0 +maintainers: +- email: zanhsieh@gmail.com + name: zanhsieh +- email: rluckie@cisco.com + name: rtluckie +- email: maor.friedman@redhat.com + name: maorfr +- email: miroslav.hadzhiev@gmail.com + name: Xtigyro +- email: mail@torstenwalter.de + name: torstenwalter +name: grafana +sources: +- https://github.com/grafana/grafana +type: application +version: 6.15.0 diff --git a/charts/k10/k10/4.5.1400/charts/grafana/README.md b/charts/k10/k10/4.5.1400/charts/grafana/README.md new file mode 100644 index 000000000..01219f7cb --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/README.md @@ -0,0 +1,528 @@ +# Grafana Helm Chart + +* Installs the web dashboarding system [Grafana](http://grafana.org/) + +## Get Repo Info + +```console +helm repo add grafana https://grafana.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release grafana/grafana +``` + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 4.0.0 (And 3.12.1) + +This version requires Helm >= 2.12.0. + +### To 5.0.0 + +You have to add --force to your helm upgrade command as the labels of the chart have changed. + +### To 6.0.0 + +This version requires Helm >= 3.1.0. + +## Configuration + +| Parameter | Description | Default | +|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `replicas` | Number of nodes | `1` | +| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` | +| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` | +| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` | +| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` | +| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`| +| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` | +| `priorityClassName` | Name of Priority Class to assign pods | `nil` | +| `image.repository` | Image repository | `grafana/grafana` | +| `image.tag` | Image tag (`Must be >= 5.0.0`) | `8.0.3` | +| `image.sha` | Image sha (optional) | `80c6d6ac633ba5ab3f722976fb1d9a138f87ca6a9934fcd26a5fc28cbde7dbfa` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Image pull secrets | `{}` | +| `service.enabled` | Enable grafana service | `true` | +| `service.type` | Kubernetes service type | `ClusterIP` | +| `service.port` | Kubernetes port where service is exposed | `80` | +| `service.portName` | Name of the port on the service | `service` | +| `service.targetPort` | Internal service is port | `3000` | +| `service.nodePort` | Kubernetes service nodePort | `nil` | +| `service.annotations` | Service annotations | `{}` | +| `service.labels` | Custom labels | `{}` | +| `service.clusterIP` | internal cluster service IP | `nil` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` | +| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` | +| `service.externalIPs` | service external IP addresses | `[]` | +| `extraExposePorts` | Additional service ports for sidecar containers| `[]` | +| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` | +| `ingress.enabled` | Enables Ingress | `false` | +| `ingress.annotations` | Ingress annotations (values are templated) | `{}` | +| `ingress.labels` | Custom labels | `{}` | +| `ingress.path` | Ingress accepted path | `/` | +| `ingress.pathType` | Ingress type of path | `Prefix` | +| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` | +| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-alb-ingress-controller/guide/ingress/annotation/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `resources` | CPU/Memory resource requests/limits | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `affinity` | Affinity settings for pod assignment | `{}` | +| `extraInitContainers` | Init containers to add to the grafana pod | `{}` | +| `extraContainers` | Sidecar containers to add to the grafana pod | `{}` | +| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` | +| `extraLabels` | Custom labels for all manifests | `{}` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `global.persistence.enabled` | Use persistent volume to store data | `false` | +| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` | +| `global.persistence.size` | Size of persistent volume claim | `20Gi` | +| `persistence.existingClaim` | Use an existing PVC to persist data | `nil` | +| `global.persistence.storageClass` | Type of persistent volume claim | `nil` | +| `global.persistence.accessMode` | Persistence access modes | `[ReadWriteOnce]` | +| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` | +| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` | +| `persistence.subPath` | Mount a sub dir of the persistent volume | `nil` | +| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` | +| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` | +| `initChownData.enabled` | If false, don't reset data ownership at startup | true | +| `initChownData.image.repository` | init-chown-data container image repository | `busybox` | +| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` | +| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` | +| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` | +| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` | +| `schedulerName` | Alternate scheduler name | `nil` | +| `env` | Extra environment variables passed to pods | `{}` | +| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. | `{}` | +| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret | `{}` | +| `enableServiceLinks` | Inject Kubernetes services as environment variables. | `true` | +| `extraSecretMounts` | Additional grafana server secret mounts | `[]` | +| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` | +| `extraConfigmapMounts` | Additional grafana server configMap volume mounts | `[]` | +| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` | +| `plugins` | Plugins to be loaded along with Grafana | `[]` | +| `datasources` | Configure grafana datasources (passed through tpl) | `{}` | +| `notifiers` | Configure grafana notifiers | `{}` | +| `dashboardProviders` | Configure grafana dashboard providers | `{}` | +| `dashboards` | Dashboards to import | `{}` | +| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` | +| `grafana.ini` | Grafana's primary configuration | `{}` | +| `ldap.enabled` | Enable LDAP authentication | `false` | +| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` | +| `ldap.config` | Grafana's LDAP configuration | `""` | +| `annotations` | Deployment annotations | `{}` | +| `labels` | Deployment labels | `{}` | +| `podAnnotations` | Pod annotations | `{}` | +| `podLabels` | Pod labels | `{}` | +| `podPortName` | Name of the grafana port on the pod | `grafana` | +| `sidecar.image.repository` | Sidecar image repository | `quay.io/kiwigrid/k8s-sidecar` | +| `sidecar.image.tag` | Sidecar image tag | `1.12.2` | +| `sidecar.image.sha` | Sidecar image sha (optional) | `""` | +| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | +| `sidecar.resources` | Sidecar resources | `{}` | +| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable | `false` | +| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | +| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` | +| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` | +| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` | +| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` | +| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` | +| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` | +| `sidecar.dashboards.provider.type` | Provider type | `file` | +| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` | +| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` | +| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` | +| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `nil` | +| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` | +| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` | +| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | +| `sidecar.dashboards.searchNamespace` | If specified, the sidecar will search for dashboard config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | +| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | +| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `nil` | +| `sidecar.datasources.searchNamespace` | If specified, the sidecar will search for datasources config-maps inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` | +| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` | +| `sidecar.notifiers.searchNamespace` | If specified, the sidecar will search for notifiers config-maps (or secrets) inside this namespace. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces | `nil` | +| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | +| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | +| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | +| `admin.existingSecret` | The name of an existing secret containing the admin credentials. | `""` | +| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` | +| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` | +| `serviceAccount.autoMount` | Automount the service account token in the pod| `true` | +| `serviceAccount.annotations` | ServiceAccount annotations | | +| `serviceAccount.create` | Create service account | `true` | +| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | +| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` | +| `rbac.create` | Create and use RBAC resources | `true` | +| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` | +| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` | +| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `true` | +| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `true` | +| `rbac.extraRoleRules` | Additional rules to add to the Role | [] | +| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | +| `command` | Define command to be executed by grafana container at startup | `nil` | +| `testFramework.enabled` | Whether to create test-related resources | `true` | +| `testFramework.image` | `test-framework` image repository. | `bats/bats` | +| `testFramework.tag` | `test-framework` image tag. | `v1.1.0` | +| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` | +| `testFramework.securityContext` | `test-framework` securityContext | `{}` | +| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` | +| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` | +| `downloadDashboardsImage.repository` | Curl docker image repo | `curlimages/curl` | +| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` | +| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` | +| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` | +| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) | +| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` | +| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | | +| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` | +| `serviceMonitor.path` | Path to scrape | `/metrics` | +| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` | +| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` | +| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` | +| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` | +| `serviceMonitor.relabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` | +| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` | +| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` | +| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` | +| `imageRenderer.image.tag` | image-renderer Image tag | `latest` | +| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` | +| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` | +| `imageRenderer.env` | extra env-vars for image-renderer | `{}` | +| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` | +| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` | +| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` | +| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` | +| `imageRenderer.service.enabled` | Enable the image-renderer service | `true` | +| `imageRenderer.service.portName` | image-renderer service port name | `'http'` | +| `imageRenderer.service.port` | image-renderer service port used by both service and deployment | `8081` | +| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` | +| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` | +| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` | +| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` | +| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` | +| `imageRenderer.resources` | Set resource limits for image-renderer pdos | `{}` | + +### Example ingress with path + +With grafana 6.3 and above +```yaml +grafana.ini: + server: + domain: monitoring.example.com + root_url: "%(protocol)s://%(domain)s/grafana" + serve_from_sub_path: true +ingress: + enabled: true + hosts: + - "monitoring.example.com" + path: "/grafana" +``` + +### Example of extraVolumeMounts + +Volume can be type persistentVolumeClaim or hostPath but not both at same time. +If none existingClaim or hostPath argument is givent then type is emptyDir. + +```yaml +- extraVolumeMounts: + - name: plugins + mountPath: /var/lib/grafana/plugins + subPath: configs/grafana/plugins + existingClaim: existing-grafana-claim + readOnly: false + - name: dashboards + mountPath: /var/lib/grafana/dashboards + hostPath: /usr/shared/grafana/dashboards + readOnly: false +``` + +## Import dashboards + +There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +dashboards: + default: + some-dashboard: + json: | + { + "annotations": + + ... + # Complete json file here + ... + + "title": "Some Dashboard", + "uid": "abcd1234", + "version": 1 + } + custom-dashboard: + # This is a path to a file inside the dashboards directory inside the chart directory + file: dashboards/custom-dashboard.json + prometheus-stats: + # Ref: https://grafana.com/dashboards/2 + gnetId: 2 + revision: 2 + datasource: Prometheus + local-dashboard: + url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json +``` + +## BASE64 dashboards + +Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit) +A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk. +If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk. + +### Gerrit use case + +Gerrit API for download files has the following schema: where {project-name} and +{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard +the url value is + +## Sidecar for dashboards + +If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana +pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with +a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written +to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported +dashboards are deleted/updated. + +A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside +one configmap is currently not properly mirrored in grafana. + +Example dashboard config: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-grafana-dashboard + labels: + grafana_dashboard: "1" +data: + k8s-dashboard.json: |- + [...] +``` + +## Sidecar for datasources + +If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the data sources in grafana can be imported. + +Secrets are recommended over configmaps for this usecase because datasources usually contain private +data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): + +```yaml +datasources: + datasources.yaml: + apiVersion: 1 + datasources: + # name of the datasource. Required + - name: Graphite + # datasource type. Required + type: graphite + # access mode. proxy or direct (Server or Browser in the UI). Required + access: proxy + # org id. will default to orgId 1 if not specified + orgId: 1 + # url + url: http://localhost:8080 + # database password, if used + password: + # database user, if used + user: + # database name, if used + database: + # enable/disable basic auth + basicAuth: + # basic auth username + basicAuthUser: + # basic auth password + basicAuthPassword: + # enable/disable with credentials headers + withCredentials: + # mark as default datasource. Max one per org + isDefault: + # fields that will be converted to json and stored in json_data + jsonData: + graphiteVersion: "1.1" + tlsAuth: true + tlsAuthWithCACert: true + # json object of data that will be encrypted. + secureJsonData: + tlsCACert: "..." + tlsClientCert: "..." + tlsClientKey: "..." + version: 1 + # allow users to edit datasources from the UI. + editable: false +``` + +## Sidecar for notifiers + +If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the notification channels in grafana can be imported. The secrets must be created before +`helm install` so that the notifiers init container can list the secrets. + +Secrets are recommended over configmaps for this usecase because alert notification channels usually contain +private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels): + +```yaml +notifiers: + - name: notification-channel-1 + type: slack + uid: notifier1 + # either + org_id: 2 + # or + org_name: Main Org. + is_default: true + send_reminder: true + frequency: 1h + disable_resolve_message: false + # See `Supported Settings` section for settings supporter for each + # alert notification type. + settings: + recipient: 'XXX' + token: 'xoxb' + uploadImage: true + url: https://slack.com + +delete_notifiers: + - name: notification-channel-1 + uid: notifier1 + org_id: 2 + - name: notification-channel-2 + # default org_id: 1 +``` + +## How to serve Grafana with a path prefix (/grafana) + +In order to serve Grafana with a prefix (e.g., ), add the following to your values.yaml. + +```yaml +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + + path: /grafana/?(.*) + hosts: + - k8s.example.dev + +grafana.ini: + server: + root_url: http://localhost:3000/grafana # this host can be localhost +``` + +## How to securely reference secrets in grafana.ini + +This example uses Grafana uses [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets. + +In grafana.ini: + +```yaml +grafana.ini: + [auth.generic_oauth] + enabled = true + client_id = $__file{/etc/secrets/auth_generic_oauth/client_id} + client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret} +``` + +Existing secret, or created along with helm: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: auth-generic-oauth-secret +type: Opaque +stringData: + client_id: + client_secret: +``` + +Include in the `extraSecretMounts` configuration flag: + +```yaml +- extraSecretMounts: + - name: auth-generic-oauth-secret-mount + secretName: auth-generic-oauth-secret + defaultMode: 0440 + mountPath: /etc/secrets/auth_generic_oauth + readOnly: true +``` + +### extraSecretMounts using a Container Storage Interface (CSI) provider + +This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure) + +```yaml +- extraSecretMounts: + - name: secrets-store-inline + mountPath: /run/secrets + readOnly: true + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: "my-provider" + nodePublishSecretRef: + name: akv-creds +``` + +## Image Renderer Plug-In + +This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/docs/remote_rendering_using_docker.md) + +```yaml +imageRenderer: + enabled: true +``` + +### Image Renderer NetworkPolicy + +By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/NOTES.txt b/charts/k10/k10/4.5.1400/charts/grafana/templates/NOTES.txt new file mode 100644 index 000000000..ca7d88e3d --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/NOTES.txt @@ -0,0 +1,54 @@ +1. Get your '{{ .Values.adminUser }}' user password by running: + + kubectl get secret --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath="{.data.admin-password}" | base64 --decode ; echo + +2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}.svc.cluster.local +{{ if .Values.ingress.enabled }} + If you bind grafana to 80, please update values in values.yaml and reinstall: + ``` + securityContext: + runAsUser: 0 + runAsGroup: 0 + fsGroup: 0 + + command: + - "setcap" + - "'cap_net_bind_service=+ep'" + - "/usr/sbin/grafana-server &&" + - "sh" + - "/run.sh" + ``` + Details refer to https://grafana.com/docs/installation/configuration/#http-port. + Or grafana would always crash. + + From outside the cluster, the server URL(s) are: +{{- range .Values.ingress.hosts }} + http://{{ . }} +{{- end }} +{{ else }} + Get the Grafana URL to visit by running these commands in the same shell: +{{ if contains "NodePort" .Values.service.type -}} + export NODE_PORT=$(kubectl get --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ template "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{ else if contains "LoadBalancer" .Values.service.type -}} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ template "grafana.namespace" . }} -w {{ template "grafana.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ template "grafana.namespace" . }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + http://$SERVICE_IP:{{ .Values.service.port -}} +{{ else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ template "grafana.namespace" . }} -l "app={{ template "grafana.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ template "grafana.namespace" . }} port-forward $POD_NAME 3000 +{{- end }} +{{- end }} + +3. Login with the password from step 1 and the username: {{ .Values.adminUser }} + +{{- if not .Values.global.persistence.enabled }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Grafana pod is terminated. ##### +################################################################################# +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/_definitions.tpl b/charts/k10/k10/4.5.1400/charts/grafana/templates/_definitions.tpl new file mode 100644 index 000000000..dfea18ae0 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/_definitions.tpl @@ -0,0 +1,3 @@ +{{/* Autogenerated, do NOT modify */}} +{{- define "k10.grafanaImageTag" -}}8.1.8{{- end -}} +{{- define "k10.grafanaInitContainerImageTag" -}}8.5-240.1648458092{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/_helpers.tpl b/charts/k10/k10/4.5.1400/charts/grafana/templates/_helpers.tpl new file mode 100644 index 000000000..aea79b673 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/_helpers.tpl @@ -0,0 +1,235 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "grafana.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "grafana.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "grafana.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account +*/}} +{{- define "grafana.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "grafana.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{- define "grafana.serviceAccountNameTest" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} +{{- else -}} + {{ default "default" .Values.serviceAccount.nameTest }} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "grafana.namespace" -}} + {{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} + {{- else -}} + {{- .Release.Namespace -}} + {{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "grafana.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- if .Values.extraLabels }} +{{ toYaml .Values.extraLabels }} +{{- end }} +{{- end -}} + +{{/* +Selector labels +*/}} +{{- define "grafana.selectorLabels" -}} +app: {{ include "grafana.name" . }} +release: {{ .Release.Name }} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "grafana.imageRenderer.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.imageRenderer.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ .Values.image.tag | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Selector labels ImageRenderer +*/}} +{{- define "grafana.imageRenderer.selectorLabels" -}} +app: {{ include "grafana.name" . }}-image-renderer +release: {{ .Release.Name }} +{{- end -}} + +{{/* +Looks if there's an existing secret and reuse its password. If not it generates +new password and use it. +*/}} +{{- define "grafana.password" -}} +{{- $secret := (lookup "v1" "Secret" (include "grafana.namespace" .) (include "grafana.fullname" .) ) -}} + {{- if $secret -}} + {{- index $secret "data" "admin-password" -}} + {{- else -}} + {{- (randAlphaNum 40) | b64enc | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "grafana.rbac.apiVersion" -}} + {{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} + {{- print "rbac.authorization.k8s.io/v1" -}} + {{- else -}} + {{- print "rbac.authorization.k8s.io/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "grafana.ingress.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) -}} + {{- print "networking.k8s.io/v1" -}} + {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} + {{- print "networking.k8s.io/v1beta1" -}} + {{- else -}} + {{- print "extensions/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* +Return if ingress is stable. +*/}} +{{- define "grafana.ingress.isStable" -}} + {{- eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return if ingress supports ingressClassName. +*/}} +{{- define "grafana.ingress.supportsIngressClassName" -}} + {{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}} +{{- end -}} + +{{/* +Return if ingress supports pathType. +*/}} +{{- define "grafana.ingress.supportsPathType" -}} + {{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) -}} +{{- end -}} + +{{/* +Figure out the grafana image tag +based on the value of global.upstreamCertifiedImages +*/}} +{{- define "get.grafanaImageTag"}} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s" (include "k10.grafanaImageTag" .) }} +{{- else }} +{{- printf "%s" (include "k10.grafanaImageTag" .) }} +{{- end }} +{{- end }} + +{{- define "get.grafanaImageRepo" }} +{{- if .Values.global.upstreamCertifiedImages }} +{{- printf "%s/%s/grafana" .Values.k10image.registry .Values.k10image.repository }} +{{- else }} +{{- print .Values.image.repository }} +{{- end }} +{{- end }} + +{{/* +Figure out the config based on +the value of airgapped.repository +*/}} +{{- define "get.grafanaServerimage" }} +{{- if not .Values.global.rhMarketPlace }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/grafana:%s" .Values.global.airgapped.repository (include "get.grafanaImageTag" .) }} +{{- else }} +{{- printf "%s:%s" (include "get.grafanaImageRepo" .) (include "get.grafanaImageTag" .) }} +{{- end }} +{{- else }} +{{- printf "%s" .Values.global.images.grafana }} +{{- end -}} +{{- end }} + +{{/* +Figure out the grafana init container busy box image tag +based on the value of global.airgapped.repository +*/}} +{{- define "get.grafanaInitContainerImageTag"}} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s" (include "k10.grafanaInitContainerImageTag" .) }} +{{- else }} +{{- printf "%s" (include "k10.grafanaInitContainerImageTag" .) }} +{{- end }} +{{- end }} + +{{- define "get.grafanaInitContainerImageRepo" }} +{{- if .Values.global.upstreamCertifiedImages }} +{{- printf "%s/%s/ubi-minimal" .Values.k10image.registry .Values.k10image.repository }} +{{- else }} +{{- print .Values.ubi.image.repository }} +{{- end }} +{{- end }} + +{{/* +Figure out the config based on +the value of airgapped.repository +*/}} +{{- define "get.grafanaInitContainerImage" }} +{{- if not .Values.global.rhMarketPlace }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/ubi-minimal:%s" .Values.global.airgapped.repository (include "get.grafanaInitContainerImageTag" .) }} +{{- else }} +{{- printf "%s:%s" (include "get.grafanaInitContainerImageRepo" .) (include "get.grafanaInitContainerImageTag" .) }} +{{- end }} +{{- else }} +{{- printf "%s:%s" (include "get.grafanaInitContainerImageRepo" .) (include "get.grafanaInitContainerImageTag" .) }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/_pod.tpl b/charts/k10/k10/4.5.1400/charts/grafana/templates/_pod.tpl new file mode 100644 index 000000000..46cee7d64 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/_pod.tpl @@ -0,0 +1,509 @@ + +{{- define "grafana.pod" -}} +{{- if .Values.schedulerName }} +schedulerName: "{{ .Values.schedulerName }}" +{{- end }} +serviceAccountName: {{ template "grafana.serviceAccountName" . }} +automountServiceAccountToken: {{ .Values.serviceAccount.autoMount }} +{{- if .Values.securityContext }} +securityContext: +{{ toYaml .Values.securityContext | indent 2 }} +{{- end }} +{{- if .Values.hostAliases }} +hostAliases: +{{ toYaml .Values.hostAliases | indent 2 }} +{{- end }} +{{- if .Values.priorityClassName }} +priorityClassName: {{ .Values.priorityClassName }} +{{- end }} +{{- if ( or .Values.global.persistence.enabled .Values.dashboards .Values.sidecar.datasources.enabled .Values.sidecar.notifiers.enabled .Values.extraInitContainers) }} +initContainers: +{{- end }} +{{- if ( and .Values.global.persistence.enabled .Values.initChownData.enabled ) }} + - name: init-chown-data + image: "{{ include "get.grafanaInitContainerImage" . }}" + imagePullPolicy: {{ .Values.ubi.image.pullPolicy }} + securityContext: + runAsNonRoot: false + runAsUser: 0 + command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }}", "/var/lib/grafana"] + resources: +{{ toYaml .Values.initChownData.resources | indent 6 }} + volumeMounts: + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} +{{- end }} +{{- if .Values.dashboards }} + - name: download-dashboards + {{- if .Values.downloadDashboardsImage.sha }} + image: "{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}" + {{- else }} + image: "{{ include "get.grafanaInitContainerImage" . }}" + {{- end }} + imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} + command: ["/bin/sh"] + args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh -x /etc/grafana/download_dashboards.sh" ] + resources: +{{ toYaml .Values.downloadDashboards.resources | indent 6 }} + env: +{{- range $key, $value := .Values.downloadDashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" +{{- end }} +{{- if .Values.downloadDashboards.envFromSecret }} + envFrom: + - secretRef: + name: {{ tpl .Values.downloadDashboards.envFromSecret . }} +{{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/download_dashboards.sh" + subPath: download_dashboards.sh + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} +{{- end }} +{{- if .Values.sidecar.datasources.enabled }} + - name: {{ template "grafana.name" . }}-sc-datasources + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + {{- if .Values.sidecar.datasources.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.sidecar.datasources.labelValue }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: {{ quote .Values.sidecar.datasources.resource }} + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.datasources.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: {{ template "grafana.name" . }}-sc-notifiers + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.notifiers.label }}" + - name: FOLDER + value: "/etc/grafana/provisioning/notifiers" + - name: RESOURCE + value: {{ quote .Values.sidecar.notifiers.resource }} + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.sidecar.notifiers.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.notifiers.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} +{{- if .Values.extraInitContainers }} +{{ toYaml .Values.extraInitContainers | indent 2 }} +{{- end }} +{{- if (or .Values.global.imagePullSecret .Values.image.pullSecrets) }} +imagePullSecrets: +{{- if .Values.global.imagePullSecret }} + - name: {{ .Values.global.imagePullSecret }} +{{- end }} +{{- if .Values.image.pullSecrets }} +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end }} +{{- end }} +enableServiceLinks: {{ .Values.enableServiceLinks }} +containers: +{{- if .Values.sidecar.dashboards.enabled }} + - name: {{ template "grafana.name" . }}-sc-dashboard + {{- if .Values.sidecar.image.sha }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + - name: METHOD + value: {{ .Values.sidecar.dashboards.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.dashboards.label }}" + {{- if .Values.sidecar.dashboards.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.sidecar.dashboards.labelValue }} + {{- end }} + - name: FOLDER + value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: {{ quote .Values.sidecar.dashboards.resource }} + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- if .Values.sidecar.dashboards.searchNamespace }} + - name: NAMESPACE + value: "{{ .Values.sidecar.dashboards.searchNamespace }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + {{- if .Values.sidecar.dashboards.folderAnnotation }} + - name: FOLDER_ANNOTATION + value: "{{ .Values.sidecar.dashboards.folderAnnotation }}" + {{- end }} + resources: +{{ toYaml .Values.sidecar.resources | indent 6 }} + volumeMounts: + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} +{{- end}} + - name: {{ .Chart.Name }} + {{- if .Values.image.sha }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}@sha256:{{ .Values.image.sha }}" + {{- else }} + image: "{{ include "get.grafanaServerimage" . }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.command }} + command: + {{- range .Values.command }} + - {{ . }} + {{- end }} + {{- end}} +{{- if .Values.containerSecurityContext }} + securityContext: +{{- toYaml .Values.containerSecurityContext | nindent 6 }} +{{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/grafana.ini" + subPath: grafana.ini + {{- if .Values.ldap.enabled }} + - name: ldap + mountPath: "/etc/grafana/ldap.toml" + subPath: ldap.toml + {{- end }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + - name: storage + mountPath: "/var/lib/grafana" +{{- if .Values.persistence.subPath }} + subPath: {{ .Values.persistence.subPath }} +{{- end }} +{{- if .Values.dashboards }} +{{- range $provider, $dashboards := .Values.dashboards }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} + - name: dashboards-{{ $provider }} + mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + subPath: "{{ $key }}.json" +{{- end }} +{{- end }} +{{- end }} +{{- end -}} +{{- if .Values.dashboardsConfigMaps }} +{{- range (keys .Values.dashboardsConfigMaps | sortAlpha) }} + - name: dashboards-{{ . }} + mountPath: "/var/lib/grafana/dashboards/{{ . }}" +{{- end }} +{{- end }} +{{/* Mounting default datasources in pod as yaml */}} + - name: config + mountPath: "/etc/grafana/provisioning/datasources/datasources.yaml" + subPath: datasources.yaml +{{- if .Values.notifiers }} + - name: config + mountPath: "/etc/grafana/provisioning/notifiers/notifiers.yaml" + subPath: notifiers.yaml +{{- end }} +{{- if .Values.dashboardProviders }} + - name: config + mountPath: "/etc/grafana/provisioning/dashboards/dashboardproviders.yaml" + subPath: dashboardproviders.yaml +{{- end }} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} +{{ if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" + subPath: provider.yaml +{{- end}} +{{- end}} +{{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + subPath: {{ .subPath | default "" }} + {{- end }} + {{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + ports: + - name: {{ .Values.service.portName }} + containerPort: {{ .Values.service.port }} + protocol: TCP + - name: {{ .Values.podPortName }} + containerPort: 3000 + protocol: TCP + env: + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: GF_SECURITY_ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: GF_SECURITY_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if .Values.plugins }} + - name: GF_INSTALL_PLUGINS + valueFrom: + configMapKeyRef: + name: {{ template "grafana.fullname" . }} + key: plugins + {{- end }} + {{- if .Values.smtp.existingSecret }} + - name: GF_SMTP_USER + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.userKey | default "user" }} + - name: GF_SMTP_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.passwordKey | default "password" }} + {{- end }} + {{ if .Values.imageRenderer.enabled }} + - name: GF_RENDERING_SERVER_URL + value: http://{{ template "grafana.fullname" . }}-image-renderer.{{ template "grafana.namespace" . }}:{{ .Values.imageRenderer.service.port }}/render + - name: GF_RENDERING_CALLBACK_URL + value: http://{{ template "grafana.fullname" . }}.{{ template "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }} + {{ end }} + - name: GF_PATHS_DATA + value: {{ (get .Values "grafana.ini").paths.data }} + - name: GF_PATHS_LOGS + value: {{ (get .Values "grafana.ini").paths.logs }} + - name: GF_PATHS_PLUGINS + value: {{ (get .Values "grafana.ini").paths.plugins }} + - name: GF_PATHS_PROVISIONING + value: {{ (get .Values "grafana.ini").paths.provisioning }} + {{- range $key, $value := .Values.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: +{{ toYaml $value | indent 10 }} + {{- end }} +{{- range $key, $value := .Values.env }} + - name: "{{ tpl $key $ }}" + value: "{{ tpl (print $value) $ }}" +{{- end }} + {{- if .Values.envFromSecret }} + envFrom: + - secretRef: + name: {{ tpl .Values.envFromSecret . }} + {{- end }} + {{- if .Values.envRenderSecret }} + envFrom: + - secretRef: + name: {{ template "grafana.fullname" . }}-env + {{- end }} + livenessProbe: +{{ toYaml .Values.livenessProbe | indent 6 }} + readinessProbe: +{{ toYaml .Values.readinessProbe | indent 6 }} + resources: +{{ toYaml .Values.resources | indent 6 }} +{{- with .Values.extraContainers }} +{{ tpl . $ | indent 2 }} +{{- end }} +{{- with .Values.nodeSelector }} +nodeSelector: +{{ toYaml . | indent 2 }} +{{- end }} +{{- with .Values.affinity }} +affinity: +{{ toYaml . | indent 2 }} +{{- end }} +{{- with .Values.tolerations }} +tolerations: +{{ toYaml . | indent 2 }} +{{- end }} +volumes: + - name: config + configMap: + name: {{ template "grafana.fullname" . }} +{{- range .Values.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} +{{- end }} + {{- if .Values.dashboards }} + {{- range (keys .Values.dashboards | sortAlpha) }} + - name: dashboards-{{ . }} + configMap: + name: {{ template "grafana.fullname" $ }}-dashboards-{{ . }} + {{- end }} + {{- end }} + {{- if .Values.dashboardsConfigMaps }} + {{ $root := . }} + {{- range $provider, $name := .Values.dashboardsConfigMaps }} + - name: dashboards-{{ $provider }} + configMap: + name: {{ tpl $name $root }} + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + - name: ldap + secret: + {{- if .Values.ldap.existingSecret }} + secretName: {{ .Values.ldap.existingSecret }} + {{- else }} + secretName: {{ template "grafana.fullname" . }} + {{- end }} + items: + - key: ldap-toml + path: ldap.toml + {{- end }} +{{- if and .Values.global.persistence.enabled (eq .Values.persistence.type "pvc") }} + - name: storage + persistentVolumeClaim: + claimName: {{ .Values.persistence.existingClaim | default (include "grafana.fullname" .) }} +{{- else if and .Values.global.persistence.enabled (eq .Values.persistence.type "statefulset") }} +# nothing +{{- else }} + - name: storage +{{- if .Values.persistence.inMemory.enabled }} + emptyDir: + medium: Memory +{{- if .Values.persistence.inMemory.sizeLimit }} + sizeLimit: {{ .Values.persistence.inMemory.sizeLimit }} +{{- end -}} +{{- else }} + emptyDir: {} +{{- end -}} +{{- end -}} +{{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + emptyDir: {} +{{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + configMap: + name: {{ template "grafana.fullname" . }}-config-dashboards +{{- end }} +{{- end }} +{{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + emptyDir: {} +{{- end -}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + emptyDir: {} +{{- end -}} +{{- range .Values.extraSecretMounts }} +{{- if .secretName }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + defaultMode: {{ .defaultMode }} +{{- else if .projected }} + - name: {{ .name }} + projected: {{- toYaml .projected | nindent 6 }} +{{- else if .csi }} + - name: {{ .name }} + csi: {{- toYaml .csi | nindent 6 }} +{{- end }} +{{- end }} +{{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + {{- if .existingClaim }} + persistentVolumeClaim: + claimName: {{ .existingClaim }} + {{- else if .hostPath }} + hostPath: + path: {{ .hostPath }} + {{- else }} + emptyDir: {} + {{- end }} +{{- end }} +{{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + emptyDir: {} +{{- end -}} +{{- if .Values.extraContainerVolumes }} +{{ toYaml .Values.extraContainerVolumes | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/clusterrole.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/clusterrole.yaml new file mode 100644 index 000000000..6d2aa55c9 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/clusterrole.yaml @@ -0,0 +1,27 @@ +{{- if .Values.enabled }} +{{- if and .Values.rbac.create (not .Values.rbac.namespaced) (not .Values.rbac.useExistingRole) }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.fullname" . }}-clusterrole +{{- if or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraClusterRoleRules) }} +rules: +{{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled }} +- apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end}} +{{- with .Values.rbac.extraClusterRoleRules }} +{{ toYaml . | indent 0 }} +{{- end}} +{{- else }} +rules: [] +{{- end}} +{{- end}} +{{- end}} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/clusterrolebinding.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/clusterrolebinding.yaml new file mode 100644 index 000000000..5e50cd7fe --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/clusterrolebinding.yaml @@ -0,0 +1,26 @@ +{{- if .Values.enabled }} +{{- if and .Values.rbac.create (not .Values.rbac.namespaced) }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "grafana.fullname" . }}-clusterrolebinding + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +subjects: + - kind: ServiceAccount + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ template "grafana.namespace" . }} +roleRef: + kind: ClusterRole +{{- if (not .Values.rbac.useExistingRole) }} + name: {{ template "grafana.fullname" . }}-clusterrole +{{- else }} + name: {{ .Values.rbac.useExistingRole }} +{{- end }} + apiGroup: rbac.authorization.k8s.io +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/configmap-dashboard-provider.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/configmap-dashboard-provider.yaml new file mode 100644 index 000000000..c3dcc0810 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/configmap-dashboard-provider.yaml @@ -0,0 +1,31 @@ +{{- if .Values.enabled }} +{{- if .Values.sidecar.dashboards.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.fullname" . }}-config-dashboards + namespace: {{ template "grafana.namespace" . }} +data: + provider.yaml: |- + apiVersion: 1 + providers: + - name: '{{ .Values.sidecar.dashboards.provider.name }}' + orgId: {{ .Values.sidecar.dashboards.provider.orgid }} + {{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + folder: '{{ .Values.sidecar.dashboards.provider.folder }}' + {{- end}} + type: {{ .Values.sidecar.dashboards.provider.type }} + disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} + allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }} + updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }} + options: + foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} +{{- end}} +{{- end}} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/configmap.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/configmap.yaml new file mode 100644 index 000000000..6bbfaeb52 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/configmap.yaml @@ -0,0 +1,99 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +data: + # Adding default prometheus datasource for grafana + datasources.yaml: | + apiVersion: 1 + datasources: + - access: proxy + editable: false + isDefault: true + name: Prometheus + type: prometheus + url: http://{{ .Values.prometheusName | trimSuffix "/" }}-exp/{{ .Values.prometheusPrefixURL | trimPrefix "/"}} + jsonData: + timeInterval: '1m' +{{- if .Values.plugins }} + plugins: {{ join "," .Values.plugins }} +{{- end }} + grafana.ini: | +{{- range $key, $value := index .Values "grafana.ini" }} + [{{ $key }}] + {{- range $elem, $elemVal := $value }} + {{- if kindIs "invalid" $elemVal }} + {{ $elem }} = + {{- else if kindIs "string" $elemVal }} + {{ $elem }} = {{ tpl $elemVal $ }} + {{- else }} + {{ $elem }} = {{ $elemVal }} + {{- end }} + {{- end }} +{{- end }} + [server] + root_url=/{{ include "k10.ingressPath" . | trimSuffix "/"}}/grafana + serve_from_sub_path=true + +{{- if .Values.datasources }} +{{ $root := . }} + {{- range $key, $value := .Values.datasources }} + {{ $key }}: | +{{ tpl (toYaml $value | indent 4) $root }} + {{- end -}} +{{- end -}} + +{{- if .Values.notifiers }} + {{- range $key, $value := .Values.notifiers }} + {{ $key }}: | +{{ toYaml $value | indent 4 }} + {{- end -}} +{{- end -}} + +{{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{ $key }}: | +{{ toYaml $value | indent 4 }} + {{- end -}} +{{- end -}} + +{{- if .Values.dashboards }} + download_dashboards.sh: | + #!/usr/bin/env sh + set -euf + {{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{- range $value.providers }} + mkdir -p {{ .options.path }} + {{- end }} + {{- end }} + {{- end }} + + {{- range $provider, $dashboards := .Values.dashboards }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} + curl -skf \ + --connect-timeout 60 \ + --max-time 60 \ + {{- if not $value.b64content }} + -H "Accept: application/json" \ + {{- if $value.token }} + -H "Authorization: token {{ $value.token }}" \ + {{- end }} + -H "Content-Type: application/json;charset=UTF-8" \ + {{ end }} + {{- if $value.url -}}"{{ $value.url }}"{{- else -}}"https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download"{{- end -}}{{ if $value.datasource }} | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g'{{ end }}{{- if $value.b64content -}} | base64 -d {{- end -}} \ + > "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + {{- end -}} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/dashboards-json-configmap.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/dashboards-json-configmap.yaml new file mode 100644 index 000000000..232cd5a5e --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/dashboards-json-configmap.yaml @@ -0,0 +1,37 @@ +{{- if .Values.enabled }} +{{- if .Values.dashboards }} +{{ $files := .Files }} +{{- range $provider, $dashboards := .Values.dashboards }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "grafana.fullname" $ }}-dashboards-{{ $provider }} + namespace: {{ template "grafana.namespace" $ }} + labels: + {{- include "grafana.labels" $ | nindent 4 }} + dashboard-provider: {{ $provider }} +{{- if $dashboards }} +data: +{{- $dashboardFound := false }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} +{{- $dashboardFound = true }} +{{ print $key | indent 2 }}.json: +{{- if hasKey $value "json" }} + |- +{{ $value.json | indent 6 }} +{{- end }} +{{- if hasKey $value "file" }} +{{ toYaml ( $files.Get $value.file ) | indent 4}} +{{- end }} +{{- end }} +{{- end }} +{{- if not $dashboardFound }} + {} +{{- end }} +{{- end }} +--- +{{- end }} + +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/deployment.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/deployment.yaml new file mode 100644 index 000000000..21395889a --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/deployment.yaml @@ -0,0 +1,52 @@ +{{- if .Values.enabled }} +{{ if (or (not .Values.global.persistence.enabled) (eq .Values.persistence.type "pvc")) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.labels }} +{{ toYaml .Values.labels | indent 4 }} +{{- end }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicas }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} +{{- with .Values.deploymentStrategy }} + strategy: +{{ toYaml . | trim | indent 4 }} +{{- end }} + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} +{{- with .Values.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} +{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} +{{- end }} +{{- if .Values.envRenderSecret }} + checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }} +{{- end }} +{{- with .Values.podAnnotations }} +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/headless-service.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/headless-service.yaml new file mode 100644 index 000000000..4715281ab --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/headless-service.yaml @@ -0,0 +1,20 @@ +{{- if .Values.enabled }} +{{- if and .Values.global.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "grafana.fullname" . }}-headless + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + clusterIP: None + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} + type: ClusterIP +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/hpa.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/hpa.yaml new file mode 100644 index 000000000..b4e610c6c --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/hpa.yaml @@ -0,0 +1,22 @@ +{{- if .Values.enabled }} +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "grafana.fullname" . }} + labels: + app: {{ template "grafana.name" . }} + helm.sh/chart: {{ template "grafana.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ template "grafana.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: +{{ toYaml .Values.autoscaling.metrics | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/image-renderer-deployment.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/image-renderer-deployment.yaml new file mode 100644 index 000000000..5fed1a5f1 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/image-renderer-deployment.yaml @@ -0,0 +1,117 @@ +{{- if .Values.enabled }} +{{ if .Values.imageRenderer.enabled }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} +{{- if .Values.imageRenderer.labels }} +{{ toYaml .Values.imageRenderer.labels | indent 4 }} +{{- end }} +{{- with .Values.imageRenderer.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.imageRenderer.replicas }} + revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} +{{- with .Values.imageRenderer.deploymentStrategy }} + strategy: +{{ toYaml . | trim | indent 4 }} +{{- end }} + template: + metadata: + labels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }} +{{- with .Values.imageRenderer.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} +{{- with .Values.imageRenderer.podAnnotations }} +{{ toYaml . | indent 8 }} +{{- end }} + spec: + + {{- if .Values.imageRenderer.schedulerName }} + schedulerName: "{{ .Values.imageRenderer.schedulerName }}" + {{- end }} + {{- if .Values.imageRenderer.serviceAccountName }} + serviceAccountName: "{{ .Values.imageRenderer.serviceAccountName }}" + {{- end }} + {{- if .Values.imageRenderer.securityContext }} + securityContext: + {{- toYaml .Values.imageRenderer.securityContext | nindent 8 }} + {{- end }} + {{- if .Values.imageRenderer.hostAliases }} + hostAliases: + {{- toYaml .Values.imageRenderer.hostAliases | nindent 8 }} + {{- end }} + {{- if .Values.imageRenderer.priorityClassName }} + priorityClassName: {{ .Values.imageRenderer.priorityClassName }} + {{- end }} + {{- if .Values.imageRenderer.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.imageRenderer.image.pullSecrets }} + - name: {{ . }} + {{- end}} + {{- end }} + containers: + - name: {{ .Chart.Name }}-image-renderer + {{- if .Values.imageRenderer.image.sha }} + image: "{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}" + {{- else }} + image: "{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }} + {{- if .Values.imageRenderer.command }} + command: + {{- range .Values.imageRenderer.command }} + - {{ . }} + {{- end }} + {{- end}} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + containerPort: {{ .Values.imageRenderer.service.port }} + protocol: TCP + env: + - name: HTTP_PORT + value: {{ .Values.imageRenderer.service.port | quote }} + {{- range $key, $value := .Values.imageRenderer.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{- end }} + securityContext: + capabilities: + drop: ['all'] + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /tmp + name: image-renderer-tmpfs + {{- with .Values.imageRenderer.resources }} + resources: +{{ toYaml . | indent 12 }} + {{- end }} + {{- with .Values.imageRenderer.nodeSelector }} + nodeSelector: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.imageRenderer.affinity }} + affinity: +{{ toYaml . | indent 8 }} + {{- end }} + {{- with .Values.imageRenderer.tolerations }} + tolerations: +{{ toYaml . | indent 8 }} + {{- end }} + volumes: + - name: image-renderer-tmpfs + emptyDir: {} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/image-renderer-network-policy.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/image-renderer-network-policy.yaml new file mode 100644 index 000000000..3730e7eba --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/image-renderer-network-policy.yaml @@ -0,0 +1,78 @@ +{{- if .Values.enabled }} +{{- if and (.Values.imageRenderer.enabled) (.Values.imageRenderer.networkPolicy.limitIngress) }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer-ingress + namespace: {{ template "grafana.namespace" . }} + annotations: + comment: Limit image-renderer ingress traffic from grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- if .Values.imageRenderer.podLabels }} + {{ toYaml .Values.imageRenderer.podLabels | nindent 6 }} + {{- end }} + + policyTypes: + - Ingress + ingress: + - ports: + - port: {{ .Values.imageRenderer.service.port }} + protocol: TCP + from: + - namespaceSelector: + matchLabels: + name: {{ template "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- if .Values.podLabels }} + {{ toYaml .Values.podLabels | nindent 14 }} + {{- end }} +{{ end }} + +{{- if and (.Values.imageRenderer.enabled) (.Values.imageRenderer.networkPolicy.limitEgress) }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer-egress + namespace: {{ template "grafana.namespace" . }} + annotations: + comment: Limit image-renderer egress traffic to grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- if .Values.imageRenderer.podLabels }} + {{ toYaml .Values.imageRenderer.podLabels | nindent 6 }} + {{- end }} + + policyTypes: + - Egress + egress: + # allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # talk only to grafana + - ports: + - port: {{ .Values.service.port }} + protocol: TCP + to: + - namespaceSelector: + matchLabels: + name: {{ template "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- if .Values.podLabels }} + {{ toYaml .Values.podLabels | nindent 14 }} + {{- end }} +{{ end }} +{{- end}} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/image-renderer-service.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/image-renderer-service.yaml new file mode 100644 index 000000000..530931327 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/image-renderer-service.yaml @@ -0,0 +1,32 @@ +{{- if .Values.enabled }} +{{ if .Values.imageRenderer.enabled }} +{{ if .Values.imageRenderer.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "grafana.fullname" . }}-image-renderer + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} +{{- if .Values.imageRenderer.service.labels }} +{{ toYaml .Values.imageRenderer.service.labels | indent 4 }} +{{- end }} +{{- with .Values.imageRenderer.service.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + type: ClusterIP + {{- if .Values.imageRenderer.service.clusterIP }} + clusterIP: {{ .Values.imageRenderer.service.clusterIP }} + {{end}} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + port: {{ .Values.imageRenderer.service.port }} + protocol: TCP + targetPort: {{ .Values.imageRenderer.service.targetPort }} + selector: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} +{{ end }} +{{ end }} +{{- end}} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/ingress.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/ingress.yaml new file mode 100644 index 000000000..80dbc798b --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/ingress.yaml @@ -0,0 +1,80 @@ +{{- if .Values.enabled }} +{{- if .Values.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "grafana.ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "grafana.ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "grafana.ingress.supportsPathType" .) "true" -}} +{{- $fullName := include "grafana.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +{{- $ingressPathType := .Values.ingress.pathType -}} +{{- $extraPaths := .Values.ingress.extraPaths -}} +apiVersion: {{ include "grafana.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.ingress.labels }} +{{ toYaml .Values.ingress.labels | indent 4 }} +{{- end }} + {{- if .Values.ingress.annotations }} + annotations: + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end -}} +{{- if .Values.ingress.tls }} + tls: +{{ tpl (toYaml .Values.ingress.tls) $ | indent 4 }} +{{- end }} + rules: + {{- if .Values.ingress.hosts }} + {{- range .Values.ingress.hosts }} + - host: {{ tpl . $}} + http: + paths: +{{- if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end }} + {{- else }} + - http: + paths: + - backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- if $ingressPath }} + path: {{ $ingressPath }} + {{- end }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + {{- end -}} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/networkpolicy.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/networkpolicy.yaml new file mode 100644 index 000000000..591ac7286 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/networkpolicy.yaml @@ -0,0 +1,18 @@ +{{- if .Values.enabled }} +{{ if .Values.service.enabled}} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ template "grafana.name" . }}-network-policy + namespace: {{ template "grafana.namespace" . }} +spec: + podSelector: + matchLabels: + release: {{ .Release.Name }} + app: {{ template "grafana.name" . }} + ingress: + - { } + egress: + - { } +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/poddisruptionbudget.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/poddisruptionbudget.yaml new file mode 100644 index 000000000..c1ee81e61 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/poddisruptionbudget.yaml @@ -0,0 +1,24 @@ +{{- if .Values.enabled }} +{{- if .Values.podDisruptionBudget }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.labels }} +{{ toYaml .Values.labels | indent 4 }} +{{- end }} +spec: +{{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} +{{- end }} +{{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} +{{- end }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/podsecuritypolicy.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/podsecuritypolicy.yaml new file mode 100644 index 000000000..0f4e58942 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/podsecuritypolicy.yaml @@ -0,0 +1,51 @@ +{{- if .Values.enabled }} +{{- if .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "grafana.fullname" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + {{- if .Values.rbac.pspUseAppArmor }} + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + {{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + # Default set from Docker, with DAC_OVERRIDE and CHOWN + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'csi' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/pvc.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/pvc.yaml new file mode 100644 index 000000000..4389846c7 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/pvc.yaml @@ -0,0 +1,33 @@ +{{- if .Values.enabled }} +{{- if and .Values.global.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.persistence.annotations }} + annotations: +{{ toYaml . | indent 4 }} + {{- end }} + {{- with .Values.persistence.finalizers }} + finalizers: +{{ toYaml . | indent 4 }} + {{- end }} +spec: + accessModes: + - {{ .Values.global.persistence.accessMode }} + resources: + requests: + storage: {{ default .Values.global.persistence.size .Values.global.persistence.grafana.size | quote }} + {{- if .Values.global.persistence.storageClass }} + storageClassName: {{ .Values.global.persistence.storageClass }} + {{- end -}} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: +{{ toYaml . | indent 6 }} + {{- end }} +{{- end }} +{{- end}} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/role.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/role.yaml new file mode 100644 index 000000000..ab67f1d5b --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/role.yaml @@ -0,0 +1,34 @@ +{{- if .Values.enabled }} +{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} +apiVersion: {{ template "grafana.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled (or .Values.sidecar.datasources.enabled .Values.rbac.extraRoleRules))) }} +rules: +{{- if .Values.rbac.pspEnabled }} +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "grafana.fullname" . }}] +{{- end }} +{{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled) }} +- apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] +{{- end }} +{{- with .Values.rbac.extraRoleRules }} +{{ toYaml . | indent 0 }} +{{- end}} +{{- else }} +rules: [] +{{- end }} +{{- end }} +{{- end}} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/rolebinding.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/rolebinding.yaml new file mode 100644 index 000000000..bd0bd5dea --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/rolebinding.yaml @@ -0,0 +1,27 @@ +{{- if .Values.enabled }} +{{- if .Values.rbac.create -}} +apiVersion: {{ template "grafana.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role +{{- if (not .Values.rbac.useExistingRole) }} + name: {{ template "grafana.fullname" . }} +{{- else }} + name: {{ .Values.rbac.useExistingRole }} +{{- end }} +subjects: +- kind: ServiceAccount + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ template "grafana.namespace" . }} +{{- end -}} +{{- end}} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/secret-env.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/secret-env.yaml new file mode 100644 index 000000000..be272234c --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/secret-env.yaml @@ -0,0 +1,16 @@ +{{- if .Values.enabled }} +{{- if .Values.envRenderSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "grafana.fullname" . }}-env + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +type: Opaque +data: +{{- range $key, $val := .Values.envRenderSecret }} + {{ $key }}: {{ $val | b64enc | quote }} +{{- end -}} +{{- end }} +{{- end}} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/secret.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/secret.yaml new file mode 100644 index 000000000..1bcd865d5 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/secret.yaml @@ -0,0 +1,28 @@ +{{- if .Values.enabled }} +{{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +type: Opaque +data: + {{- if and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} + admin-user: {{ .Values.adminUser | b64enc | quote }} + {{- if .Values.adminPassword }} + admin-password: {{ .Values.adminPassword | b64enc | quote }} + {{- else }} + admin-password: {{ template "grafana.password" . }} + {{- end }} + {{- end }} + {{- if not .Values.ldap.existingSecret }} + ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} + {{- end }} +{{- end }} +{{- end}} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/service.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/service.yaml new file mode 100644 index 000000000..ce3c165d5 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/service.yaml @@ -0,0 +1,59 @@ +{{- if .Values.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- if .Values.service.labels }} +{{ toYaml .Values.service.labels | indent 4 }} +{{- end }} + annotations: + getambassador.io/config: | + --- + apiVersion: getambassador.io/v3alpha1 + kind: Mapping + name: grafana-server-mapping + prefix: /{{- include "k10.ingressPath" . | trimSuffix "/" }}/grafana/ + rewrite: / + service: {{ template "grafana.fullname" .}}:{{ .Values.service.port }} + timeout_ms: 15000 + hostname: "*" + +spec: +{{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} + type: ClusterIP + {{- if .Values.service.clusterIP }} + clusterIP: {{ .Values.service.clusterIP }} + {{end}} +{{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: +{{ toYaml .Values.service.loadBalancerSourceRanges | indent 4 }} + {{- end -}} +{{- else }} + type: {{ .Values.service.type }} +{{- end }} +{{- if .Values.service.externalIPs }} + externalIPs: +{{ toYaml .Values.service.externalIPs | indent 4 }} +{{- end }} + ports: + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + protocol: TCP + targetPort: {{ .Values.service.targetPort }} +{{ if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{.Values.service.nodePort}} +{{ end }} + {{- if .Values.extraExposePorts }} + {{- tpl (toYaml .Values.extraExposePorts) . | indent 4 }} + {{- end }} + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/serviceaccount.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/serviceaccount.yaml new file mode 100644 index 000000000..4d178e1b5 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.enabled }} +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.serviceAccount.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} + name: {{ template "grafana.serviceAccountName" . }} + namespace: {{ template "grafana.namespace" . }} +{{- end }} +{{- end}} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/servicemonitor.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/servicemonitor.yaml new file mode 100644 index 000000000..cbe9890d8 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/servicemonitor.yaml @@ -0,0 +1,42 @@ +{{- if .Values.enabled }} +{{- if .Values.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "grafana.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ .Values.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- if .Values.serviceMonitor.labels }} + {{- toYaml .Values.serviceMonitor.labels | nindent 4 }} + {{- end }} +spec: + endpoints: + - interval: {{ .Values.serviceMonitor.interval }} + {{- if .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.serviceMonitor.scrapeTimeout }} + {{- end }} + honorLabels: true + port: {{ .Values.service.portName }} + path: {{ .Values.serviceMonitor.path }} + scheme: {{ .Values.serviceMonitor.scheme }} + {{- if .Values.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml .Values.serviceMonitor.tlsConfig | nindent 6 }} + {{- end }} + {{- if .Values.serviceMonitor.relabelings }} + relabelings: + {{- toYaml .Values.serviceMonitor.relabelings | nindent 4 }} + {{- end }} + jobLabel: "{{ .Release.Name }}" + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 8 }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} +{{- end}} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/templates/statefulset.yaml b/charts/k10/k10/4.5.1400/charts/grafana/templates/statefulset.yaml new file mode 100644 index 000000000..86f04c1a5 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/templates/statefulset.yaml @@ -0,0 +1,55 @@ +{{- if .Values.enabled }} +{{- if and .Values.global.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "statefulset")}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "grafana.fullname" . }} + namespace: {{ template "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +{{- with .Values.annotations }} + annotations: +{{ toYaml . | indent 4 }} +{{- end }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + serviceName: {{ template "grafana.fullname" . }}-headless + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} +{{- with .Values.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} + {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} +{{- end }} +{{- with .Values.podAnnotations }} +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: + - {{ .Values.global.persistence.accessMode }} + storageClassName: {{ .Values.global.persistence.storageClass }} + resources: + requests: + storage: {{ .Values.global.persistence.size }} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: +{{ toYaml . | indent 10 }} + {{- end }} +{{- end }} +{{- end}} diff --git a/charts/k10/k10/4.5.1400/charts/grafana/values.yaml b/charts/k10/k10/4.5.1400/charts/grafana/values.yaml new file mode 100644 index 000000000..75f84fd6e --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/grafana/values.yaml @@ -0,0 +1,3126 @@ +# Value to control if grafana installation +enabled: true + +# Values for prometheus datasource +prometheusName: prometheus-server +prometheusPrefixURL: /k10/prometheus + +#general purpose image for init container +ubi: + image: + repository: registry.access.redhat.com/ubi8/ubi-minimal + tag: 8.5-240.1648458092 + pullPolicy: IfNotPresent + +k10image: + registry: gcr.io + repository: kasten-images + +rbac: + create: true + ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) + # useExistingRole: name-of-some-(cluster)role + pspEnabled: true + pspUseAppArmor: true + namespaced: false + extraRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] +serviceAccount: + create: true + name: + nameTest: +# annotations: +# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here + autoMount: true + +replicas: 1 + +## Create HorizontalPodAutoscaler object for deployment type +# +autoscaling: + enabled: false +# minReplicas: 1 +# maxReplicas: 10 +# metrics: +# - type: Resource +# resource: +# name: cpu +# targetAverageUtilization: 60 +# - type: Resource +# resource: +# name: memory +# targetAverageUtilization: 60 + +## See `kubectl explain poddisruptionbudget.spec` for more +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} +# minAvailable: 1 +# maxUnavailable: 1 + +## See `kubectl explain deployment.spec.strategy` for more +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +deploymentStrategy: + type: Recreate + +readinessProbe: + httpGet: + path: /api/health + port: 3000 + +livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + failureThreshold: 10 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: "default-scheduler" + +image: + repository: grafana/grafana + tag: 8.1.0 + sha: "" + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + +testFramework: + enabled: false + image: "bats/bats" + tag: "v1.1.0" + imagePullPolicy: IfNotPresent + securityContext: {} + +securityContext: + runAsUser: 472 + runAsGroup: 472 + fsGroup: 472 + +containerSecurityContext: + {} + +extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # subPath: certificates.crt # (optional) + # configMap: certs-configmap + # readOnly: true + + +extraEmptyDirMounts: [] + # - name: provisioning-notifiers + # mountPath: /etc/grafana/provisioning/notifiers + + +# Apply extra labels to common labels. +extraLabels: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: + +downloadDashboardsImage: + repository: curlimages/curl + tag: 7.73.0 + sha: "" + pullPolicy: IfNotPresent + +downloadDashboards: + env: {} + envFromSecret: "" + resources: {} + +## Pod Annotations +# podAnnotations: {} + +## Pod Labels +# podLabels: {} + +podPortName: grafana + +## Deployment annotations +# annotations: {} + +## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## + +service: + enabled: true + type: ClusterIP + port: 80 + targetPort: 3000 + # targetPort: 4181 To be used with a proxy extraContainer + annotations: {} + labels: {} + portName: service + +serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + +extraExposePorts: [] + # - name: keycloak + # port: 8080 + # targetPort: 8080 + # type: ClusterIP + +# overrides pod.spec.hostAliases in the grafana deployment's pods +hostAliases: [] + # - ip: "1.2.3.4" + # hostnames: + # - "my.host.com" + +ingress: + enabled: false + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # Values can be templated + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: / + + # pathType is only for k8s >= 1.1= + pathType: Prefix + + hosts: + - chart-example.local + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## Or for k8s > 1.19 + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +# +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Affinity for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +extraInitContainers: [] + +## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod +extraContainers: | +# - name: proxy +# image: quay.io/gambol99/keycloak-proxy:latest +# args: +# - -provider=github +# - -client-id= +# - -client-secret= +# - -github-org= +# - -email-domain=* +# - -cookie-secret= +# - -http-address=http://0.0.0.0:4181 +# - -upstream-url=http://127.0.0.1:3000 +# ports: +# - name: proxy-web +# containerPort: 4181 + +## Volumes that can be used in init containers that will not be mounted to deployment pods +extraContainerVolumes: [] +# - name: volume-from-secret +# secret: +# secretName: secret-to-mount +# - name: empty-dir-volume +# emptyDir: {} + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + type: pvc + enabled: true + # storageClassName: default + accessModes: + - ReadWriteOnce + size: 5Gi + # annotations: {} + finalizers: + - kubernetes.io/pvc-protection + # selectorLabels: {} + # subPath: "" + # existingClaim: + + ## If persistence is not enabled, this allows to mount the + ## local storage in-memory to improve performance + ## + inMemory: + enabled: false + ## The maximum usage on memory medium EmptyDir would be + ## the minimum value between the SizeLimit specified + ## here and the sum of memory limits of all containers in a pod + ## + # sizeLimit: 300Mi + +initChownData: + ## If false, data ownership will not be reset at startup + ## This allows the prometheus-server to be run with an arbitrary user + ## + enabled: true + + ## initChownData container image + ## +# image: +# repository: busybox +# tag: "1.31.1" +# sha: "" +# pullPolicy: IfNotPresent + + ## initChownData resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + + +# Administrator credentials when not using an existing secret (see below) +adminUser: admin +# adminPassword: strongpassword + +# Use an existing secret for the admin user. +admin: + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + +## Define command to be executed at startup by grafana container +## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) +## Default is "run.sh" as defined in grafana's Dockerfile +# command: +# - "sh" +# - "/run.sh" + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Extra environment variables that will be pass onto deployment pods +## +## to provide grafana with access to CloudWatch on AWS EKS: +## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) +## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the +## same oidc eks provider as noted before (same as the existing line) +## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name +## +## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", +## +## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess +## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) +## +## env: +## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here +## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token +## AWS_REGION: us-east-1 +## +## 5. uncomment the EKS section in extraSecretMounts: below +## 6. uncomment the annotation section in the serviceAccount: above +## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn + +env: {} + +## "valueFrom" environment variable references that will be added to deployment pods +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core +## Renders in container spec as: +## env: +## ... +## - name: +## valueFrom: +## +envValueFrom: {} + +## The name of a secret in the same kubernetes namespace which contain values to be added to the environment +## This can be useful for auth tokens, etc. Value is templated. +envFromSecret: "" + +## Sensible environment variables that will be rendered as new secret object +## This can be useful for auth tokens, etc +envRenderSecret: {} + +# Inject Kubernetes services as environment variables. +# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables +enableServiceLinks: true + +## Additional grafana server secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # secretName: grafana-secret-files + # readOnly: true + # subPath: "" + # + # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) + # - name: aws-iam-token + # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount + # readOnly: true + # projected: + # defaultMode: 420 + # sources: + # - serviceAccountToken: + # audience: sts.amazonaws.com + # expirationSeconds: 86400 + # path: token + # + # for CSI e.g. Azure Key Vault use the following + # - name: secrets-store-inline + # mountPath: /run/secrets + # readOnly: true + # csi: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "akv-grafana-spc" + # nodePublishSecretRef: # Only required when using service principal mode + # name: grafana-akv-creds # Only required when using service principal mode + +## Additional grafana server volume mounts +# Defines additional volume mounts. +extraVolumeMounts: [] + # - name: extra-volume-0 + # mountPath: /mnt/volume0 + # readOnly: true + # existingClaim: volume-claim + # - name: extra-volume-1 + # mountPath: /mnt/volume1 + # readOnly: true + # hostPath: /usr/shared/ + +## Pass the plugins you want installed as a list. +## +plugins: [] + # - digrich-bubblechart-panel + # - grafana-clock-panel + +## Configure grafana datasources +## ref: http://docs.grafana.org/administration/provisioning/#datasources +## +#datasources: +# datasources.yaml: +# apiVersion: 1 +# datasources: +# - name: Prometheus +# type: prometheus +# url: prometheus-server-exp/k10/prometheus +# access: proxy +# isDefault: true +# - name: CloudWatch +# type: cloudwatch +# access: proxy +# uid: cloudwatch +# editable: false +# jsonData: +# authType: default +# defaultRegion: us-east-1 + +## Configure notifiers +## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels +## +notifiers: {} +# notifiers.yaml: +# notifiers: +# - name: email-notifier +# type: email +# uid: email1 +# # either: +# org_id: 1 +# # or +# org_name: Main Org. +# is_default: true +# settings: +# addresses: an_email_address@example.com +# delete_notifiers: + +## Configure grafana dashboard providers +## ref: http://docs.grafana.org/administration/provisioning/#dashboards +## +## `path` must be /var/lib/grafana/dashboards/ +## +dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'default' + orgId: 1 + folder: '' + type: file + disableDeletion: true + editable: false + options: + path: /var/lib/grafana/dashboards + +## Configure grafana dashboard to import +## NOTE: To use dashboards you must also enable/configure dashboardProviders +## ref: https://grafana.com/dashboards +## +## dashboards per provider, use provider name as key. +## +dashboards: + default: + default: + json: | + { + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 1, + "iteration": 1645712665620, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 18, + "panels": [], + "title": "Applications", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "yellow", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 0, + "y": 1 + }, + "id": 24, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_backup_ended_overall{cluster=\"$cluster\", state=\"succeeded\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Backups Completed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 5, + "y": 1 + }, + "id": 33, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_backup_ended_overall{cluster=\"$cluster\", state=~\"failed|cancelled\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Backups Failed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "#EAB839", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 8, + "y": 1 + }, + "id": 34, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_backup_skipped_overall{cluster=\"$cluster\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Backups Skipped", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 13, + "y": 1 + }, + "id": 35, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_restore_ended_overall{cluster=\"$cluster\", state=\"succeeded\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Restores Completed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 18, + "y": 1 + }, + "id": 36, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_restore_ended_overall{cluster=\"$cluster\", state=~\"failed|cancelled\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Restores Failed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "#EAB839", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 21, + "y": 1 + }, + "id": 23, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_restore_skipped_overall{cluster=\"$cluster\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Restores Skipped", + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 16, + "panels": [], + "title": "Cluster", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "yellow", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 0, + "y": 9 + }, + "id": 10, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_backup_cluster_ended_overall{cluster=\"$cluster\", state=\"succeeded\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Cluster Backups Completed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 5, + "y": 9 + }, + "id": 19, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_backup_cluster_ended_overall{cluster=\"$cluster\", state=~\"failed|cancelled\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Cluster Backups Failed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "#EAB839", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 8, + "y": 9 + }, + "id": 28, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_backup_cluster_skipped_overall{cluster=\"$cluster\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Cluster Backups Skipped", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 5, + "x": 13, + "y": 9 + }, + "id": 21, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_restore_cluster_ended_overall{cluster=\"$cluster\", state=\"succeeded\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Cluster Restores Completed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 18, + "y": 9 + }, + "id": 22, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_restore_cluster_ended_overall{cluster=\"$cluster\", state=~\"failed|cancelled\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Cluster Restores Failed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "#EAB839", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 21, + "y": 9 + }, + "id": 25, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_restore_cluster_skipped_overall{cluster=\"$cluster\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Cluster Restores Skipped", + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 31, + "panels": [], + "title": "Backup Exports", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 0, + "y": 17 + }, + "id": 38, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_export_ended_overall{cluster=\"$cluster\", state=\"succeeded\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Exports Completed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 5, + "y": 17 + }, + "id": 29, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_export_ended_overall{cluster=\"$cluster\", state=~\"failed|cancelled\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Exports Failed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "#EAB839", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 8, + "y": 17 + }, + "id": 20, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_export_skipped_overall{cluster=\"$cluster\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Exports Skipped", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 5, + "x": 13, + "y": 17 + }, + "id": 27, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_import_ended_overall{cluster=\"$cluster\", state=\"succeeded\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Imports Completed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 18, + "y": 17 + }, + "id": 39, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_import_ended_overall{cluster=\"$cluster\", state=~\"failed|cancelled\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Imports Failed", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "#EAB839", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 21, + "y": 17 + }, + "id": 37, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_import_skipped_overall{cluster=\"$cluster\"}[$__range])))", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Imports Skipped", + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 23 + }, + "id": 14, + "panels": [], + "title": "System", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + }, + "unit": "runs" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 0, + "y": 24 + }, + "id": 12, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_run_ended_overall{cluster=\"$cluster\", state=\"succeeded\"}[$__range])))", + "format": "time_series", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "timeFrom": null, + "title": "Policy Runs", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "-" + } + }, + "type": "value" + } + ], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "yellow", + "value": 1 + } + ] + }, + "unit": "runs" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 3, + "y": 24 + }, + "id": 40, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "sum(round(increase(action_run_skipped_overall{cluster=\"$cluster\"}[$__range])))", + "format": "time_series", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "timeFrom": null, + "title": "Policy Runs Skipped", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#ccccdc", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 6, + "y": 24 + }, + "id": 6, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": true, + "expr": "catalog_persistent_volume_disk_space_used_bytes{cluster=\"$cluster\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Catalog Volume Used", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 100, + "min": 0, + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 70 + }, + { + "color": "orange", + "value": 80 + }, + { + "color": "red", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 9, + "y": 24 + }, + "id": 2, + "options": { + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": true, + "expr": "100-catalog_persistent_volume_free_space_percent{cluster=\"$cluster\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Catalog Volume Used Space", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#ccccdc", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 12, + "y": 24 + }, + "id": 8, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": true, + "expr": "jobs_persistent_volume_disk_space_used_bytes{cluster=\"$cluster\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Jobs Volume Used", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 100, + "min": 0, + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 70 + }, + { + "color": "orange", + "value": 80 + }, + { + "color": "red", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 15, + "y": 24 + }, + "id": 4, + "options": { + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": true, + "expr": "100-jobs_persistent_volume_free_space_percent{cluster=\"$cluster\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Jobs Volume Used Space", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#ccccdc", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 18, + "y": 24 + }, + "id": 7, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": true, + "expr": "logging_persistent_volume_disk_space_used_bytes{cluster=\"$cluster\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Logging Volume Used", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 100, + "min": 0, + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 70 + }, + { + "color": "orange", + "value": 80 + }, + { + "color": "red", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 21, + "y": 24 + }, + "id": 3, + "options": { + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": true, + "expr": "100-logging_persistent_volume_free_space_percent{cluster=\"$cluster\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Logging Volume Used Space", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "text", + "value": null + }, + { + "color": "green", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 0, + "y": 30 + }, + "id": 41, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "compliance_count{state=\"Compliant\"}", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Compliant Applications", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 3, + "y": 30 + }, + "id": 42, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "compliance_count{state=\"NotCompliant\"}", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Non-Compliant Applications", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 6, + "y": 30 + }, + "id": 43, + "interval": "1m", + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": false, + "expr": "compliance_count{state=\"Unmanaged\"}", + "hide": false, + "interval": "", + "legendFormat": "", + "refId": "B" + } + ], + "timeFrom": null, + "title": "Unmanaged Applications", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#ccccdc", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 12, + "y": 30 + }, + "id": 44, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": true, + "expr": "snapshot_storage_size_bytes{cluster=\"$cluster\", type=\"physical\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Snapshot Size (Physical)", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#ccccdc", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 15, + "y": 30 + }, + "id": 45, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": true, + "expr": "snapshot_storage_size_bytes{cluster=\"$cluster\", type=\"logical\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Snapshot Size (Logical)", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#ccccdc", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 18, + "y": 30 + }, + "id": 46, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": true, + "expr": "export_storage_size_bytes{cluster=\"$cluster\", type=\"physical\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Export Size (Physical)", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "-", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "#ccccdc", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 3, + "x": 21, + "y": 30 + }, + "id": 47, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.1.8", + "targets": [ + { + "exemplar": true, + "expr": "export_storage_size_bytes{cluster=\"$cluster\", type=\"logical\"}", + "interval": "", + "legendFormat": "", + "queryType": "randomWalk", + "refId": "A" + } + ], + "title": "Export Size (Logical)", + "type": "stat" + } + ], + "schemaVersion": 30, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "description": null, + "error": null, + "hide": 2, + "label": "Cluster", + "name": "cluster", + "query": "", + "skipUrlSync": false, + "type": "constant" + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "K10 Dashboard", + "uid": "8Ebb3xS7k", + "version": 1 + } + + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # token: '' + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # token: '' + # b64content: true + +## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. +## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. +## ConfigMap data example: +## +## data: +## example-dashboard.json: | +## RAW_JSON +## +dashboardsConfigMaps: {} +# default: "" + +## Grafana's primary configuration +## NOTE: values in map will be converted to ini format +## ref: http://docs.grafana.org/installation/configuration/ +## +grafana.ini: + paths: + data: /var/lib/grafana/ + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net + dashboards: + default_home_dashboard_path: /var/lib/grafana/dashboards/default/default.json +## grafana Authentication can be enabled with the following values on grafana.ini +# server: +# # The full public facing url you use in browser, used for redirects and emails +## domain: +# root_url: /k10/grafana +# serve_from_sub_path: true + + auth: + disable_login_form: true + disable_signout_menu: true + + auth.basic: + enabled: false + + auth.anonymous: + enabled: true + org_name: Main Org. + org_role: Admin + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://api.github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: +## LDAP Authentication can be enabled with the following values on grafana.ini +## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + +## Grafana's LDAP configuration +## Templated by the template in _helpers.tpl +## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled +## ref: http://docs.grafana.org/installation/configuration/#auth-ldap +## ref: http://docs.grafana.org/installation/ldap/#configuration +ldap: + enabled: false + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + +## Grafana's SMTP configuration +## NOTE: To enable, grafana.ini must be configured with smtp.enabled +## ref: http://docs.grafana.org/installation/configuration/#smtp +smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" + +## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders +## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards +sidecar: + image: + repository: quay.io/kiwigrid/k8s-sidecar + tag: 1.12.2 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + dashboards: + enabled: false + SCProvider: true + # label that the configmaps with dashboards are marked with + label: grafana_dashboard + # value of label that the configmaps with dashboards are set to + labelValue: null + # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) + folder: /tmp/dashboards + # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead + defaultFolderName: null + # If specified, the sidecar will search for dashboard config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # search in configmap, secret or both + resource: both + # If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + # provider configuration that lets grafana manage the dashboards + provider: + # name of the provider, should be unique + name: sidecarProvider + # orgid as configured in grafana + orgid: 1 + # folder in which the dashboards should be imported in grafana + folder: '' + # type of the provider + type: file + # disableDelete to activate a import-only behaviour + disableDelete: false + # allow updating provisioned dashboards from the UI + allowUiUpdates: false + # allow Grafana to replicate dashboard structure from filesystem + foldersFromFilesStructure: false + datasources: + enabled: false + # label that the configmaps with datasources are marked with + label: grafana_datasource + # value of label that the configmaps with datasources are set to + labelValue: null + # If specified, the sidecar will search for datasource config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # search in configmap, secret or both + resource: both + notifiers: + enabled: false + # label that the configmaps with notifiers are marked with + label: grafana_notifier + # If specified, the sidecar will search for notifier config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # search in configmap, secret or both + resource: both + +## Override the deployment namespace +## +namespaceOverride: "" + +## Number of old ReplicaSets to retain +## +revisionHistoryLimit: 10 + +## Add a seperate remote image renderer deployment/service +imageRenderer: + # Enable the image-renderer deployment & service + enabled: false + replicas: 1 + image: + # image-renderer Image repository + repository: grafana/grafana-image-renderer + # image-renderer Image tag + tag: latest + # image-renderer Image sha (optional) + sha: "" + # image-renderer ImagePullPolicy + pullPolicy: Always + # extra environment variables + env: + HTTP_HOST: "0.0.0.0" + # RENDERING_ARGS: --disable-gpu,--window-size=1280x758 + # RENDERING_MODE: clustered + # image-renderer deployment serviceAccount + serviceAccountName: "" + # image-renderer deployment securityContext + securityContext: {} + # image-renderer deployment Host Aliases + hostAliases: [] + # image-renderer deployment priority class + priorityClassName: '' + service: + # Enable the image-renderer service + enabled: true + # image-renderer service port name + portName: 'http' + # image-renderer service port used by both service and deployment + port: 8081 + targetPort: 8081 + # In case a sub_path is used this needs to be added to the image renderer callback + grafanaSubPath: "" + # name of the image-renderer port on the pod + podPortName: http + # number of image-renderer replica sets to keep + revisionHistoryLimit: 10 + networkPolicy: + # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods + limitIngress: true + # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods + limitEgress: false + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/Chart.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/Chart.yaml new file mode 100644 index 000000000..3aa2d8141 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/Chart.yaml @@ -0,0 +1,30 @@ +apiVersion: v2 +appVersion: 2.26.0 +dependencies: +- condition: kubeStateMetrics.enabled + name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 3.4.* +description: Prometheus is a monitoring system and time series database. +home: https://prometheus.io/ +icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png +maintainers: +- email: gianrubio@gmail.com + name: gianrubio +- email: zanhsieh@gmail.com + name: zanhsieh +- email: miroslav.hadzhiev@gmail.com + name: Xtigyro +- email: monotek23@gmail.com + name: monotek +- email: naseem@transit.app + name: naseemkullah +name: prometheus +sources: +- https://github.com/prometheus/alertmanager +- https://github.com/prometheus/prometheus +- https://github.com/prometheus/pushgateway +- https://github.com/prometheus/node_exporter +- https://github.com/kubernetes/kube-state-metrics +type: application +version: 14.6.0 diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/README.md b/charts/k10/k10/4.5.1400/charts/prometheus/README.md new file mode 100644 index 000000000..25f27f3f6 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/README.md @@ -0,0 +1,224 @@ +# Prometheus + +[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true. + +This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.16+ +- Helm 3+ + +## Get Repo Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo add kube-state-metrics https://kubernetes.github.io/kube-state-metrics +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +```console +# Helm +$ helm install [RELEASE_NAME] prometheus-community/prometheus +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Dependencies + +By default this chart installs additional, dependent charts: + +- [stable/kube-state-metrics](https://github.com/helm/charts/tree/master/stable/kube-state-metrics) + +To disable the dependency during installation, set `kubeStateMetrics.enabled` to `false`. + +_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._ + +## Uninstall Chart + +```console +# Helm +$ helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Upgrading Chart + +```console +# Helm +$ helm upgrade [RELEASE_NAME] [CHART] --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### To 9.0 + +Version 9.0 adds a new option to enable or disable the Prometheus Server. This supports the use case of running a Prometheus server in one k8s cluster and scraping exporters in another cluster while using the same chart for each deployment. To install the server `server.enabled` must be set to `true`. + +### To 5.0 + +As of version 5.0, this chart uses Prometheus 2.x. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data. + +Prometheus version 2.x has made changes to alertmanager, storage and recording rules. Check out the migration guide [here](https://prometheus.io/docs/prometheus/2.0/migration/). + +Users of this chart will need to update their alerting rules to the new format before they can upgrade. + +### Example Migration + +Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.x while keeping your old data do the following: + +1. Update the `prometheus-old` release. Disable scraping on every component besides the prometheus server, similar to the configuration below: + + ```yaml + alertmanager: + enabled: false + alertmanagerFiles: + alertmanager.yml: "" + kubeStateMetrics: + enabled: false + nodeExporter: + enabled: false + pushgateway: + enabled: false + server: + extraArgs: + storage.local.retention: 720h + serverFiles: + alerts: "" + prometheus.yml: "" + rules: "" + ``` + +1. Deploy a new release of the chart with version 5.0+ using prometheus 2.x. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target. + + ```yaml + prometheus.yml: + ... + remote_read: + - url: http://prometheus-old/api/v1/read + ... + ``` + + Old data will be available when you query the new prometheus instance. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +# Helm 2 +$ helm inspect values prometheus-community/prometheus + +# Helm 3 +$ helm show values prometheus-community/prometheus +``` + +You may similarly use the above configuration commands on each chart [dependency](#dependencies) to see it's configurations. + +### Scraping Pod Metrics via Annotations + +This chart uses a default configuration that causes prometheus to scrape a variety of kubernetes resource types, provided they have the correct annotations. In this section we describe how to configure pods to be scraped; for information on how other resource types can be scraped you can do a `helm template` to get the kubernetes resource definitions, and then reference the prometheus configuration in the ConfigMap against the prometheus documentation for [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) and [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config). + +In order to get prometheus to scrape pods, you must add annotations to the the pods as below: + +```yaml +metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: /metrics + prometheus.io/port: "8080" +``` + +You should adjust `prometheus.io/path` based on the URL that your pod serves metrics from. `prometheus.io/port` should be set to the port that your pod serves metrics from. Note that the values for `prometheus.io/scrape` and `prometheus.io/port` must be enclosed in double quotes. + +### Sharing Alerts Between Services + +Note that when [installing](#install-chart) or [upgrading](#upgrading-chart) you may use multiple values override files. This is particularly useful when you have alerts belonging to multiple services in the cluster. For example, + +```yaml +# values.yaml +# ... + +# service1-alert.yaml +serverFiles: + alerts: + service1: + - alert: anAlert + # ... + +# service2-alert.yaml +serverFiles: + alerts: + service2: + - alert: anAlert + # ... +``` + +```console +helm install [RELEASE_NAME] prometheus-community/prometheus -f values.yaml -f service1-alert.yaml -f service2-alert.yaml +``` + +### RBAC Configuration + +Roles and RoleBindings resources will be created automatically for `server` service. + +To manually setup RBAC you need to set the parameter `rbac.create=false` and specify the service account to be used for each service by setting the parameters: `serviceAccounts.{{ component }}.create` to `false` and `serviceAccounts.{{ component }}.name` to the name of a pre-existing service account. + +> **Tip**: You can refer to the default `*-clusterrole.yaml` and `*-clusterrolebinding.yaml` files in [templates](templates/) to customize your own. + +### ConfigMap Files + +AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod. + +Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod. + +### Ingress TLS + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```console +kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file: + +```yaml +server: + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: true + + ## Prometheus server Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - prometheus.domain.com + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: prometheus-server-tls + hosts: + - prometheus.domain.com +``` + +### NetworkPolicy + +Enabling Network Policy for Prometheus will secure connections to Alert Manager and Kube State Metrics by only accepting connections from Prometheus Server. All inbound connections to Prometheus Server are still allowed. + +To enable network policy for Prometheus, install a networking plugin that implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true. + +If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need to manually create a networkpolicy which allows it. diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/NOTES.txt b/charts/k10/k10/4.5.1400/charts/prometheus/templates/NOTES.txt new file mode 100644 index 000000000..0e8868f0b --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/NOTES.txt @@ -0,0 +1,112 @@ +{{- if .Values.server.enabled -}} +The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.server.ingress.enabled -}} +From outside the cluster, the server URL(s) are: +{{- range .Values.server.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Prometheus server URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.server.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.server.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }} +{{- else if contains "ClusterIP" .Values.server.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.server.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9090 +{{- end }} +{{- end }} + +{{- if .Values.server.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Server pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{ if .Values.alertmanager.enabled }} +The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.alertmanager.ingress.enabled -}} +From outside the cluster, the alertmanager URL(s) are: +{{- range .Values.alertmanager.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Alertmanager URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.alertmanager.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }} +{{- else if contains "ClusterIP" .Values.alertmanager.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.alertmanager.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093 +{{- end }} +{{- end }} + +{{- if .Values.alertmanager.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the AlertManager pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{- if .Values.nodeExporter.podSecurityPolicy.enabled }} +{{- else }} +################################################################################# +###### WARNING: Pod Security Policy has been moved to a global property. ##### +###### use .Values.podSecurityPolicy.enabled with pod-based ##### +###### annotations ##### +###### (e.g. .Values.nodeExporter.podSecurityPolicy.annotations) ##### +################################################################################# +{{- end }} + +{{ if .Values.pushgateway.enabled }} +The Prometheus PushGateway can be accessed via port {{ .Values.pushgateway.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.pushgateway.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.pushgateway.ingress.enabled -}} +From outside the cluster, the pushgateway URL(s) are: +{{- range .Values.pushgateway.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the PushGateway URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.pushgateway.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.pushgateway.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.pushgateway.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.pushgateway.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.pushgateway.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.pushgateway.service.servicePort }} +{{- else if contains "ClusterIP" .Values.pushgateway.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "prometheus.name" . }},component={{ .Values.pushgateway.name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9091 +{{- end }} +{{- end }} +{{- end }} + +For more information on running Prometheus, visit: +https://prometheus.io/ diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/_definitions.tpl b/charts/k10/k10/4.5.1400/charts/prometheus/templates/_definitions.tpl new file mode 100644 index 000000000..d93364c7f --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/_definitions.tpl @@ -0,0 +1,3 @@ +{{/* Autogenerated, do NOT modify */}} +{{- define "k10.prometheusImageTag" -}}v2.26.0{{- end -}} +{{- define "k10.prometheusConfigMapReloaderImageTag" -}}v0.5.0{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/_helpers.tpl b/charts/k10/k10/4.5.1400/charts/prometheus/templates/_helpers.tpl new file mode 100644 index 000000000..287ed192a --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/_helpers.tpl @@ -0,0 +1,400 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create unified labels for prometheus components +*/}} +{{- define "prometheus.common.matchLabels" -}} +app: {{ template "prometheus.name" . }} +release: {{ .Release.Name }} +{{- end -}} + +{{- define "prometheus.common.metaLabels" -}} +chart: {{ template "prometheus.chart" . }} +heritage: {{ .Release.Service }} +{{- end -}} + +{{- define "prometheus.alertmanager.labels" -}} +{{ include "prometheus.alertmanager.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.alertmanager.matchLabels" -}} +component: {{ .Values.alertmanager.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.nodeExporter.labels" -}} +{{ include "prometheus.nodeExporter.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.nodeExporter.matchLabels" -}} +component: {{ .Values.nodeExporter.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.pushgateway.labels" -}} +{{ include "prometheus.pushgateway.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.pushgateway.matchLabels" -}} +component: {{ .Values.pushgateway.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{- define "prometheus.server.labels" -}} +{{ include "prometheus.server.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.server.matchLabels" -}} +component: {{ .Values.server.name | quote }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Figure out the config based on +the value of airgapped.repository +*/}} +{{- define "get.cmreloadimage" }} +{{- if not .Values.global.rhMarketPlace }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/configmap-reload:%s" .Values.global.airgapped.repository (include "get.cmReloadImageTag" .) }} +{{- else }} +{{- printf "%s:%s" (include "get.cmReloadImageRepo" .) (include "get.cmReloadImageTag" .) }} +{{- end }} +{{- else }} +{{- printf "%s" (get .Values.global.images "configmap-reload") }} +{{- end -}} +{{- end }} + +{{/* +Figure out the config based on +the value of airgapped.repository +*/}} +{{- define "get.serverimage" }} +{{- if not .Values.global.rhMarketPlace }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/prometheus:%s" .Values.global.airgapped.repository (include "get.promImageTag" .) }} +{{- else }} +{{- printf "%s:%s" (include "get.promImageRepo" .) (include "get.promImageTag" .) }} +{{- end }} +{{- else }} +{{- printf "%s" (get .Values.global.images "prometheus") }} +{{- end -}} +{{- end }} + + +{{/* +Figure out the configmap-reload image tag +based on the value of global.upstreamCertifiedImages +*/}} +{{- define "get.cmReloadImageTag"}} +{{- if .Values.global.upstreamCertifiedImages }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s-rh-ubi" (include "k10.prometheusConfigMapReloaderImageTag" .) }} +{{- else }} +{{- printf "%s-rh-ubi" (include "k10.prometheusConfigMapReloaderImageTag" .) }} +{{- end }} +{{- else }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s" (include "k10.prometheusConfigMapReloaderImageTag" .) }} +{{- else }} +{{- printf "%s" (include "k10.prometheusConfigMapReloaderImageTag" .) }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Figure out the prometheus image tag +based on the value of global.upstreamCertifiedImages +*/}} +{{- define "get.promImageTag"}} +{{- if .Values.global.upstreamCertifiedImages }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s-rh-ubi" (include "k10.prometheusImageTag" .) }} +{{- else }} +{{- printf "%s-rh-ubi" (include "k10.prometheusImageTag" .) }} +{{- end }} +{{- else }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s" (include "k10.prometheusImageTag" .) }} +{{- else }} +{{- printf "%s" (include "k10.prometheusImageTag" .) }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Figure out the configmap-reload image repo +based on the value of global.upstreamCertifiedImages +*/}} +{{- define "get.cmReloadImageRepo" }} +{{- if .Values.global.upstreamCertifiedImages }} +{{- printf "%s/%s/configmap-reload" .Values.k10image.registry .Values.k10image.repository }} +{{- else }} +{{- print .Values.configmapReload.prometheus.image.repository }} +{{- end }} +{{- end }} + +{{/* +Figure out the prom image repo +based on the value of global.upstreamCertifiedImages +*/}} +{{- define "get.promImageRepo" }} +{{- if .Values.global.upstreamCertifiedImages }} +{{- printf "%s/%s/prometheus" .Values.k10image.registry .Values.k10image.repository }} +{{- else }} +{{- print .Values.server.image.repository }} +{{- end }} +{{- end }} + +{{/* +Create a fully qualified alertmanager name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} + +{{- define "prometheus.alertmanager.fullname" -}} +{{- if .Values.alertmanager.fullnameOverride -}} +{{- .Values.alertmanager.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.alertmanager.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified node-exporter name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.nodeExporter.fullname" -}} +{{- if .Values.nodeExporter.fullnameOverride -}} +{{- .Values.nodeExporter.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.nodeExporter.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified Prometheus server name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.server.fullname" -}} +{{- if .Values.server.fullnameOverride -}} +{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified Prometheus server clusterrole name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.server.clusterrolefullname" -}} +{{- if .Values.server.clusterRoleNameOverride -}} +{{- .Values.server.clusterRoleNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- if .Values.server.fullnameOverride -}} +{{- printf "%s-%s" .Release.Name .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified pushgateway name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.pushgateway.fullname" -}} +{{- if .Values.pushgateway.fullnameOverride -}} +{{- .Values.pushgateway.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.pushgateway.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Get KubeVersion removing pre-release information. +*/}} +{{- define "prometheus.kubeVersion" -}} + {{- default .Capabilities.KubeVersion.Version (regexFind "v[0-9]+\\.[0-9]+\\.[0-9]+" .Capabilities.KubeVersion.Version) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "prometheus.deployment.apiVersion" -}} +{{- print "apps/v1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for daemonset. +*/}} +{{- define "prometheus.daemonset.apiVersion" -}} +{{- print "apps/v1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "prometheus.networkPolicy.apiVersion" -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "prometheus.podSecurityPolicy.apiVersion" -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "rbac.apiVersion" -}} +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "ingress.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19.x" (include "prometheus.kubeVersion" .)) -}} + {{- print "networking.k8s.io/v1" -}} + {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} + {{- print "networking.k8s.io/v1beta1" -}} + {{- else -}} + {{- print "extensions/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* +Return if ingress is stable. +*/}} +{{- define "ingress.isStable" -}} + {{- eq (include "ingress.apiVersion" .) "networking.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return if ingress supports ingressClassName. +*/}} +{{- define "ingress.supportsIngressClassName" -}} + {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} +{{- end -}} +{{/* +Return if ingress supports pathType. +*/}} +{{- define "ingress.supportsPathType" -}} + {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the alertmanager component +*/}} +{{- define "prometheus.serviceAccountName.alertmanager" -}} +{{- if .Values.serviceAccounts.alertmanager.create -}} + {{ default (include "prometheus.alertmanager.fullname" .) .Values.serviceAccounts.alertmanager.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.alertmanager.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the nodeExporter component +*/}} +{{- define "prometheus.serviceAccountName.nodeExporter" -}} +{{- if .Values.serviceAccounts.nodeExporter.create -}} + {{ default (include "prometheus.nodeExporter.fullname" .) .Values.serviceAccounts.nodeExporter.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.nodeExporter.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the pushgateway component +*/}} +{{- define "prometheus.serviceAccountName.pushgateway" -}} +{{- if .Values.serviceAccounts.pushgateway.create -}} + {{ default (include "prometheus.pushgateway.fullname" .) .Values.serviceAccounts.pushgateway.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.pushgateway.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the server component +*/}} +{{- define "prometheus.serviceAccountName.server" -}} +{{- if .Values.serviceAccounts.server.create -}} + {{ default (include "prometheus.server.fullname" .) .Values.serviceAccounts.server.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.server.name }} +{{- end -}} +{{- end -}} + +{{/* +Define the prometheus.namespace template if set with forceNamespace or .Release.Namespace is set +*/}} +{{- define "prometheus.namespace" -}} +{{- if .Values.forceNamespace -}} +{{ printf "namespace: %s" .Values.forceNamespace }} +{{- else -}} +{{ printf "namespace: %s" .Release.Namespace }} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/clusterrole.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/clusterrole.yaml new file mode 100644 index 000000000..c732ff4e5 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/clusterrole.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create .Values.alertmanager.useClusterRole (not .Values.alertmanager.useExistingRole) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.alertmanager.fullname" . }} +{{- else }} + [] +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml new file mode 100644 index 000000000..6f13e98b5 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/clusterrolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create .Values.alertmanager.useClusterRole -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{ include "prometheus.namespace" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole +{{- if (not .Values.alertmanager.useExistingRole) }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{- else }} + name: {{ .Values.alertmanager.useExistingRole }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/cm.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/cm.yaml new file mode 100644 index 000000000..cb09bf067 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/cm.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.alertmanager.enabled (and (empty .Values.alertmanager.configMapOverrideName) (empty .Values.alertmanager.configFromSecret)) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +data: +{{- $root := . -}} +{{- range $key, $value := .Values.alertmanagerFiles }} + {{- if $key | regexMatch ".*\\.ya?ml$" }} + {{ $key }}: | +{{ toYaml $value | default "{}" | indent 4 }} + {{- else }} + {{ $key }}: {{ toYaml $value | indent 4 }} + {{- end }} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/deploy.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/deploy.yaml new file mode 100644 index 000000000..fe6e9b9ac --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/deploy.yaml @@ -0,0 +1,161 @@ +{{- if and .Values.alertmanager.enabled (not .Values.alertmanager.statefulSet.enabled) -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.alertmanager.deploymentAnnotations }} + annotations: + {{ toYaml .Values.alertmanager.deploymentAnnotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + replicas: {{ .Values.alertmanager.replicaCount }} + {{- if .Values.alertmanager.strategy }} + strategy: +{{ toYaml .Values.alertmanager.strategy | trim | indent 4 }} + {{ if eq .Values.alertmanager.strategy.type "Recreate" }}rollingUpdate: null{{ end }} +{{- end }} + template: + metadata: + {{- if .Values.alertmanager.podAnnotations }} + annotations: + {{ toYaml .Values.alertmanager.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 8 }} + {{- if .Values.alertmanager.podLabels}} + {{ toYaml .Values.alertmanager.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.alertmanager.schedulerName }} + schedulerName: "{{ .Values.alertmanager.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} + {{- if .Values.alertmanager.extraInitContainers }} + initContainers: +{{ toYaml .Values.alertmanager.extraInitContainers | indent 8 }} + {{- end }} +{{- if .Values.alertmanager.priorityClassName }} + priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} + image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" + env: + {{- range $key, $value := .Values.alertmanager.extraEnv }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + args: + - --config.file=/etc/config/{{ .Values.alertmanager.configFileName }} + - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} + - --cluster.advertise-address=[$(POD_IP)]:6783 + {{- range $key, $value := .Values.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.alertmanager.baseURL }} + - --web.external-url={{ .Values.alertmanager.baseURL }} + {{- end }} + + ports: + - containerPort: 9093 + readinessProbe: + httpGet: + path: {{ .Values.alertmanager.prefixURL }}/-/ready + port: 9093 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: +{{ toYaml .Values.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" + subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + + {{- if .Values.configmapReload.alertmanager.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.alertmanager.name }} + image: "{{ include "get.cmreloadimage" .}}" + imagePullPolicy: "{{ .Values.configmapReload.alertmanager.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9093{{ .Values.alertmanager.prefixURL }}/-/reload + resources: +{{ toYaml .Values.configmapReload.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} + {{- end }} + {{- with .Values.alertmanager.dnsConfig }} + dnsConfig: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.securityContext }} + securityContext: +{{ toYaml .Values.alertmanager.securityContext | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.tolerations }} + tolerations: +{{ toYaml .Values.alertmanager.tolerations | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.affinity }} + affinity: +{{ toYaml .Values.alertmanager.affinity | indent 8 }} + {{- end }} + volumes: + - name: config-volume + {{- if empty .Values.alertmanager.configFromSecret }} + configMap: + name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.alertmanager.configFromSecret }} + {{- end }} + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + - name: storage-volume + {{- if .Values.alertmanager.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.alertmanager.persistentVolume.existingClaim }}{{ .Values.alertmanager.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + emptyDir: + {{- if .Values.alertmanager.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.alertmanager.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} + {{- end -}} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/headless-svc.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/headless-svc.yaml new file mode 100644 index 000000000..8c402c408 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/headless-svc.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.alertmanager.statefulSet.headless.annotations }} + annotations: +{{ toYaml .Values.alertmanager.statefulSet.headless.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- if .Values.alertmanager.statefulSet.headless.labels }} +{{ toYaml .Values.alertmanager.statefulSet.headless.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }}-headless +{{ include "prometheus.namespace" . | indent 2 }} +spec: + clusterIP: None + ports: + - name: http + port: {{ .Values.alertmanager.statefulSet.headless.servicePort }} + protocol: TCP + targetPort: 9093 +{{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - name: meshpeer + port: 6783 + protocol: TCP + targetPort: 6783 +{{- end }} + selector: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/ingress.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/ingress.yaml new file mode 100644 index 000000000..6e856360b --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/ingress.yaml @@ -0,0 +1,57 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.alertmanager.fullname" . }} +{{- $servicePort := .Values.alertmanager.service.servicePort -}} +{{- $ingressPath := .Values.alertmanager.ingress.path -}} +{{- $ingressPathType := .Values.alertmanager.ingress.pathType -}} +{{- $extraPaths := .Values.alertmanager.ingress.extraPaths -}} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: +{{- if .Values.alertmanager.ingress.annotations }} + annotations: +{{ toYaml .Values.alertmanager.ingress.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- range $key, $value := .Values.alertmanager.ingress.extraLabels }} + {{ $key }}: {{ $value }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.alertmanager.ingress.ingressClassName }} + ingressClassName: {{ .Values.alertmanager.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.alertmanager.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end -}} +{{- if .Values.alertmanager.ingress.tls }} + tls: +{{ toYaml .Values.alertmanager.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/netpol.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/netpol.yaml new file mode 100644 index 000000000..e44ade60e --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/netpol.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.alertmanager.enabled .Values.networkPolicy.enabled -}} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + ingress: + - from: + - podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 12 }} + - ports: + - port: 9093 +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/pdb.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/pdb.yaml new file mode 100644 index 000000000..41a92f364 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.alertmanager.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.alertmanager.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.alertmanager.labels" . | nindent 6 }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/psp.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/psp.yaml new file mode 100644 index 000000000..64fb13003 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/psp.yaml @@ -0,0 +1,46 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.alertmanager.fullname" . }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + annotations: +{{- if .Values.alertmanager.podSecurityPolicy.annotations }} +{{ toYaml .Values.alertmanager.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'secret' + allowedHostPaths: + - pathPrefix: /etc + readOnly: true + - pathPrefix: {{ .Values.alertmanager.persistentVolume.mountPath }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: true +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/pvc.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/pvc.yaml new file mode 100644 index 000000000..28774d0e0 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/pvc.yaml @@ -0,0 +1,39 @@ +{{- if not .Values.alertmanager.statefulSet.enabled -}} +{{- if and .Values.alertmanager.enabled .Values.alertmanager.persistentVolume.enabled -}} +{{- if not .Values.alertmanager.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.alertmanager.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + accessModes: +{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 4 }} +{{- if .Values.alertmanager.persistentVolume.storageClass }} + {{- if (eq "-" .Values.alertmanager.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" + {{- end }} +{{- else if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.global.persistence.storageClass }}" + {{- end }} +{{- end }} +{{- if .Values.alertmanager.persistentVolume.volumeBindingMode }} + volumeBindingModeName: "{{ .Values.alertmanager.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.alertmanager.persistentVolume.size }}" +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/role.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/role.yaml new file mode 100644 index 000000000..ce60eaf0a --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/role.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create (eq .Values.alertmanager.useClusterRole false) (not .Values.alertmanager.useExistingRole) -}} +{{- range $.Values.alertmanager.namespaces }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: Role +metadata: + labels: + {{- include "prometheus.alertmanager.labels" $ | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" $ }} + namespace: {{ . }} +rules: +{{- if $.Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.alertmanager.fullname" $ }} +{{- else }} + [] +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/rolebinding.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/rolebinding.yaml new file mode 100644 index 000000000..906d6522d --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/rolebinding.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.alertmanager.enabled .Values.rbac.create (eq .Values.alertmanager.useClusterRole false) -}} +{{ range $.Values.alertmanager.namespaces }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: RoleBinding +metadata: + labels: + {{- include "prometheus.alertmanager.labels" $ | nindent 4 }} + name: {{ template "prometheus.alertmanager.fullname" $ }} + namespace: {{ . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.alertmanager" $ }} +{{ include "prometheus.namespace" $ | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role +{{- if (not $.Values.alertmanager.useExistingRole) }} + name: {{ template "prometheus.alertmanager.fullname" $ }} +{{- else }} + name: {{ $.Values.alertmanager.useExistingRole }} +{{- end }} +{{- end }} +{{ end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/service.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/service.yaml new file mode 100644 index 000000000..9edc9ac65 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/service.yaml @@ -0,0 +1,53 @@ +{{- if .Values.alertmanager.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.alertmanager.service.annotations }} + annotations: +{{ toYaml .Values.alertmanager.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} +{{- if .Values.alertmanager.service.labels }} +{{ toYaml .Values.alertmanager.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.alertmanager.service.clusterIP }} + clusterIP: {{ .Values.alertmanager.service.clusterIP }} +{{- end }} +{{- if .Values.alertmanager.service.externalIPs }} + externalIPs: +{{ toYaml .Values.alertmanager.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.alertmanager.service.loadBalancerIP }} +{{- end }} +{{- if .Values.alertmanager.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.alertmanager.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.alertmanager.service.servicePort }} + protocol: TCP + targetPort: 9093 + {{- if .Values.alertmanager.service.nodePort }} + nodePort: {{ .Values.alertmanager.service.nodePort }} + {{- end }} +{{- if .Values.alertmanager.service.enableMeshPeer }} + - name: meshpeer + port: 6783 + protocol: TCP + targetPort: 6783 +{{- end }} + selector: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 4 }} +{{- if .Values.alertmanager.service.sessionAffinity }} + sessionAffinity: {{ .Values.alertmanager.service.sessionAffinity }} +{{- end }} + type: "{{ .Values.alertmanager.service.type }}" +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/serviceaccount.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/serviceaccount.yaml new file mode 100644 index 000000000..a5d996a85 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.alertmanager.enabled .Values.serviceAccounts.alertmanager.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.alertmanager.annotations | indent 4 }} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/sts.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/sts.yaml new file mode 100644 index 000000000..95bbfe6c8 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/alertmanager/sts.yaml @@ -0,0 +1,187 @@ +{{- if and .Values.alertmanager.enabled .Values.alertmanager.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: +{{- if .Values.alertmanager.statefulSet.annotations }} + annotations: + {{ toYaml .Values.alertmanager.statefulSet.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 4 }} + {{- if .Values.alertmanager.statefulSet.labels}} + {{ toYaml .Values.alertmanager.statefulSet.labels | nindent 4 }} + {{- end}} + name: {{ template "prometheus.alertmanager.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + serviceName: {{ template "prometheus.alertmanager.fullname" . }}-headless + selector: + matchLabels: + {{- include "prometheus.alertmanager.matchLabels" . | nindent 6 }} + replicas: {{ .Values.alertmanager.replicaCount }} + podManagementPolicy: {{ .Values.alertmanager.statefulSet.podManagementPolicy }} + template: + metadata: + {{- if .Values.alertmanager.podAnnotations }} + annotations: + {{ toYaml .Values.alertmanager.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.alertmanager.labels" . | nindent 8 }} + {{- if .Values.alertmanager.podLabels}} + {{ toYaml .Values.alertmanager.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.alertmanager.affinity }} + affinity: +{{ toYaml .Values.alertmanager.affinity | indent 8 }} +{{- end }} +{{- if .Values.alertmanager.schedulerName }} + schedulerName: "{{ .Values.alertmanager.schedulerName }}" +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.alertmanager" . }} +{{- if .Values.alertmanager.priorityClassName }} + priorityClassName: "{{ .Values.alertmanager.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }} + image: "{{ .Values.alertmanager.image.repository }}:{{ .Values.alertmanager.image.tag }}" + imagePullPolicy: "{{ .Values.alertmanager.image.pullPolicy }}" + env: + {{- range $key, $value := .Values.alertmanager.extraEnv }} + - name: {{ $key }} + value: {{ $value }} + {{- end }} + - name: POD_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + args: + - --config.file=/etc/config/alertmanager.yml + - --storage.path={{ .Values.alertmanager.persistentVolume.mountPath }} + {{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - --cluster.advertise-address=[$(POD_IP)]:6783 + - --cluster.listen-address=0.0.0.0:6783 + {{- range $n := until (.Values.alertmanager.replicaCount | int) }} + - --cluster.peer={{ template "prometheus.alertmanager.fullname" $ }}-{{ $n }}.{{ template "prometheus.alertmanager.fullname" $ }}-headless:6783 + {{- end }} + {{- else }} + - --cluster.listen-address= + {{- end }} + {{- range $key, $value := .Values.alertmanager.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.alertmanager.baseURL }} + - --web.external-url={{ .Values.alertmanager.baseURL }} + {{- end }} + + ports: + - containerPort: 9093 + {{- if .Values.alertmanager.statefulSet.headless.enableMeshPeer }} + - containerPort: 6783 + {{- end }} + readinessProbe: + httpGet: + path: {{ .Values.alertmanager.prefixURL }}/#/status + port: 9093 + initialDelaySeconds: 30 + timeoutSeconds: 30 + resources: +{{ toYaml .Values.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: "{{ .Values.alertmanager.persistentVolume.mountPath }}" + subPath: "{{ .Values.alertmanager.persistentVolume.subPath }}" + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.configmapReload.alertmanager.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.alertmanager.name }}-{{ .Values.configmapReload.alertmanager.name }} + image: "{{ include "get.cmreloadimage" .}}" + imagePullPolicy: "{{ .Values.configmapReload.alertmanager.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://localhost:9093{{ .Values.alertmanager.prefixURL }}/-/reload + resources: +{{ toYaml .Values.configmapReload.alertmanager.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.nodeSelector }} + nodeSelector: +{{ toYaml .Values.alertmanager.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.securityContext }} + securityContext: +{{ toYaml .Values.alertmanager.securityContext | indent 8 }} + {{- end }} + {{- if .Values.alertmanager.tolerations }} + tolerations: +{{ toYaml .Values.alertmanager.tolerations | indent 8 }} + {{- end }} + volumes: + - name: config-volume + {{- if empty .Values.alertmanager.configFromSecret }} + configMap: + name: {{ if .Values.alertmanager.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.alertmanager.configMapOverrideName }}{{- else }}{{ template "prometheus.alertmanager.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.alertmanager.configFromSecret }} + {{- end }} + {{- range .Values.alertmanager.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.alertmanager.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage-volume + {{- if .Values.alertmanager.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.alertmanager.persistentVolume.annotations | indent 10 }} + {{- end }} + spec: + accessModes: +{{ toYaml .Values.alertmanager.persistentVolume.accessModes | indent 10 }} + resources: + requests: + storage: "{{ .Values.alertmanager.persistentVolume.size }}" + {{- if .Values.alertmanager.persistentVolume.storageClass }} + {{- if (eq "-" .Values.alertmanager.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.alertmanager.persistentVolume.storageClass }}" + {{- end }} + {{- else if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.global.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: storage-volume + emptyDir: + {{- if .Values.alertmanager.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.alertmanager.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/daemonset.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/daemonset.yaml new file mode 100644 index 000000000..667be9f49 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/daemonset.yaml @@ -0,0 +1,146 @@ +{{- if .Values.nodeExporter.enabled -}} +apiVersion: {{ template "prometheus.daemonset.apiVersion" . }} +kind: DaemonSet +metadata: +{{- if .Values.nodeExporter.deploymentAnnotations }} + annotations: +{{ toYaml .Values.nodeExporter.deploymentAnnotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + matchLabels: + {{- include "prometheus.nodeExporter.matchLabels" . | nindent 6 }} + {{- if .Values.nodeExporter.updateStrategy }} + updateStrategy: +{{ toYaml .Values.nodeExporter.updateStrategy | indent 4 }} + {{- end }} + template: + metadata: + {{- if .Values.nodeExporter.podAnnotations }} + annotations: +{{ toYaml .Values.nodeExporter.podAnnotations | indent 8 }} + {{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 8 }} +{{- if .Values.nodeExporter.pod.labels }} +{{ toYaml .Values.nodeExporter.pod.labels | indent 8 }} +{{- end }} + spec: + serviceAccountName: {{ template "prometheus.serviceAccountName.nodeExporter" . }} + {{- if .Values.nodeExporter.extraInitContainers }} + initContainers: +{{ toYaml .Values.nodeExporter.extraInitContainers | indent 8 }} + {{- end }} +{{- if .Values.nodeExporter.priorityClassName }} + priorityClassName: "{{ .Values.nodeExporter.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.nodeExporter.name }} + image: "{{ .Values.nodeExporter.image.repository }}:{{ .Values.nodeExporter.image.tag }}" + imagePullPolicy: "{{ .Values.nodeExporter.image.pullPolicy }}" + args: + - --path.procfs=/host/proc + - --path.sysfs=/host/sys + {{- if .Values.nodeExporter.hostRootfs }} + - --path.rootfs=/host/root + {{- end }} + {{- if .Values.nodeExporter.hostNetwork }} + - --web.listen-address=:{{ .Values.nodeExporter.service.hostPort }} + {{- end }} + {{- range $key, $value := .Values.nodeExporter.extraArgs }} + {{- if $value }} + - --{{ $key }}={{ $value }} + {{- else }} + - --{{ $key }} + {{- end }} + {{- end }} + ports: + - name: metrics + {{- if .Values.nodeExporter.hostNetwork }} + containerPort: {{ .Values.nodeExporter.service.hostPort }} + {{- else }} + containerPort: 9100 + {{- end }} + hostPort: {{ .Values.nodeExporter.service.hostPort }} + resources: +{{ toYaml .Values.nodeExporter.resources | indent 12 }} + volumeMounts: + - name: proc + mountPath: /host/proc + readOnly: true + - name: sys + mountPath: /host/sys + readOnly: true + {{- if .Values.nodeExporter.hostRootfs }} + - name: root + mountPath: /host/root + mountPropagation: HostToContainer + readOnly: true + {{- end }} + {{- range .Values.nodeExporter.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- if .mountPropagation }} + mountPropagation: {{ .mountPropagation }} + {{- end }} + {{- end }} + {{- range .Values.nodeExporter.extraConfigmapMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.hostNetwork }} + hostNetwork: true + {{- end }} + {{- if .Values.nodeExporter.hostPID }} + hostPID: true + {{- end }} + {{- if .Values.nodeExporter.tolerations }} + tolerations: +{{ toYaml .Values.nodeExporter.tolerations | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.nodeSelector }} + nodeSelector: +{{ toYaml .Values.nodeExporter.nodeSelector | indent 8 }} + {{- end }} + {{- with .Values.nodeExporter.dnsConfig }} + dnsConfig: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.nodeExporter.securityContext }} + securityContext: +{{ toYaml .Values.nodeExporter.securityContext | indent 8 }} + {{- end }} + volumes: + - name: proc + hostPath: + path: /proc + - name: sys + hostPath: + path: /sys + {{- if .Values.nodeExporter.hostRootfs }} + - name: root + hostPath: + path: / + {{- end }} + {{- range .Values.nodeExporter.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.nodeExporter.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/psp.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/psp.yaml new file mode 100644 index 000000000..bd9c73bee --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/psp.yaml @@ -0,0 +1,55 @@ +{{- if and .Values.nodeExporter.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + annotations: +{{- if .Values.nodeExporter.podSecurityPolicy.annotations }} +{{ toYaml .Values.nodeExporter.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'configMap' + - 'hostPath' + - 'secret' + allowedHostPaths: + - pathPrefix: /proc + readOnly: true + - pathPrefix: /sys + readOnly: true + - pathPrefix: / + readOnly: true + {{- range .Values.nodeExporter.extraHostPathMounts }} + - pathPrefix: {{ .hostPath }} + readOnly: {{ .readOnly }} + {{- end }} + hostNetwork: {{ .Values.nodeExporter.hostNetwork }} + hostPID: {{ .Values.nodeExporter.hostPID }} + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false + hostPorts: + - min: 1 + max: 65535 +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/role.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/role.yaml new file mode 100644 index 000000000..d8ef3ed90 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/role.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} +{{- if or (default .Values.nodeExporter.podSecurityPolicy.enabled false) (.Values.podSecurityPolicy.enabled) }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} +{{ include "prometheus.namespace" . | indent 2 }} +rules: +- apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: + - {{ template "prometheus.nodeExporter.fullname" . }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/rolebinding.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/rolebinding.yaml new file mode 100644 index 000000000..06914b70a --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.nodeExporter.enabled .Values.rbac.create }} +{{- if .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ template "prometheus.nodeExporter.fullname" . }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} +{{ include "prometheus.namespace" . | indent 2 }} +roleRef: + kind: Role + name: {{ template "prometheus.nodeExporter.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: +- kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} +{{ include "prometheus.namespace" . | indent 2 }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/serviceaccount.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/serviceaccount.yaml new file mode 100644 index 000000000..0cf91afba --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.nodeExporter.enabled .Values.serviceAccounts.nodeExporter.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.nodeExporter" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.nodeExporter.annotations | indent 4 }} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/svc.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/svc.yaml new file mode 100644 index 000000000..26d1eaa21 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/node-exporter/svc.yaml @@ -0,0 +1,47 @@ +{{- if .Values.nodeExporter.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.nodeExporter.service.annotations }} + annotations: +{{ toYaml .Values.nodeExporter.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.nodeExporter.labels" . | nindent 4 }} +{{- if .Values.nodeExporter.service.labels }} +{{ toYaml .Values.nodeExporter.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.nodeExporter.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.nodeExporter.service.clusterIP }} + clusterIP: {{ .Values.nodeExporter.service.clusterIP }} +{{- end }} +{{- if .Values.nodeExporter.service.externalIPs }} + externalIPs: +{{ toYaml .Values.nodeExporter.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.nodeExporter.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.nodeExporter.service.loadBalancerIP }} +{{- end }} +{{- if .Values.nodeExporter.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.nodeExporter.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: metrics + {{- if .Values.nodeExporter.hostNetwork }} + port: {{ .Values.nodeExporter.service.hostPort }} + protocol: TCP + targetPort: {{ .Values.nodeExporter.service.hostPort }} + {{- else }} + port: {{ .Values.nodeExporter.service.servicePort }} + protocol: TCP + targetPort: 9100 + {{- end }} + selector: + {{- include "prometheus.nodeExporter.matchLabels" . | nindent 4 }} + type: "{{ .Values.nodeExporter.service.type }}" +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/clusterrole.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/clusterrole.yaml new file mode 100644 index 000000000..76ecf053f --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/clusterrole.yaml @@ -0,0 +1,21 @@ +{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.pushgateway.fullname" . }} +{{- else }} + [] +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml new file mode 100644 index 000000000..15770ee50 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.pushgateway.enabled .Values.rbac.create -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.pushgateway" . }} +{{ include "prometheus.namespace" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.pushgateway.fullname" . }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/deploy.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/deploy.yaml new file mode 100644 index 000000000..ffdbfcc42 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/deploy.yaml @@ -0,0 +1,119 @@ +{{- if .Values.pushgateway.enabled -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.pushgateway.deploymentAnnotations }} + annotations: + {{ toYaml .Values.pushgateway.deploymentAnnotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} + matchLabels: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} + replicas: {{ .Values.pushgateway.replicaCount }} + {{- if .Values.pushgateway.strategy }} + strategy: +{{ toYaml .Values.pushgateway.strategy | trim | indent 4 }} + {{ if eq .Values.pushgateway.strategy.type "Recreate" }}rollingUpdate: null{{ end }} +{{- end }} + template: + metadata: + {{- if .Values.pushgateway.podAnnotations }} + annotations: + {{ toYaml .Values.pushgateway.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 8 }} + {{- if .Values.pushgateway.podLabels }} + {{ toYaml .Values.pushgateway.podLabels | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "prometheus.serviceAccountName.pushgateway" . }} + {{- if .Values.pushgateway.extraInitContainers }} + initContainers: +{{ toYaml .Values.pushgateway.extraInitContainers | indent 8 }} + {{- end }} +{{- if .Values.pushgateway.priorityClassName }} + priorityClassName: "{{ .Values.pushgateway.priorityClassName }}" +{{- end }} + containers: + - name: {{ template "prometheus.name" . }}-{{ .Values.pushgateway.name }} + image: "{{ .Values.pushgateway.image.repository }}:{{ .Values.pushgateway.image.tag }}" + imagePullPolicy: "{{ .Values.pushgateway.image.pullPolicy }}" + args: + {{- range $key, $value := .Values.pushgateway.extraArgs }} + {{- $stringvalue := toString $value }} + {{- if eq $stringvalue "true" }} + - --{{ $key }} + {{- else }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- end }} + ports: + - containerPort: 9091 + livenessProbe: + httpGet: + {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} + path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/healthy + {{- else }} + path: /-/healthy + {{- end }} + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + readinessProbe: + httpGet: + {{- if (index .Values "pushgateway" "extraArgs" "web.route-prefix") }} + path: /{{ index .Values "pushgateway" "extraArgs" "web.route-prefix" }}/-/ready + {{- else }} + path: /-/ready + {{- end }} + port: 9091 + initialDelaySeconds: 10 + timeoutSeconds: 10 + resources: +{{ toYaml .Values.pushgateway.resources | indent 12 }} + {{- if .Values.pushgateway.persistentVolume.enabled }} + volumeMounts: + - name: storage-volume + mountPath: "{{ .Values.pushgateway.persistentVolume.mountPath }}" + subPath: "{{ .Values.pushgateway.persistentVolume.subPath }}" + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.nodeSelector }} + nodeSelector: +{{ toYaml .Values.pushgateway.nodeSelector | indent 8 }} + {{- end }} + {{- with .Values.pushgateway.dnsConfig }} + dnsConfig: +{{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.securityContext }} + securityContext: +{{ toYaml .Values.pushgateway.securityContext | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.tolerations }} + tolerations: +{{ toYaml .Values.pushgateway.tolerations | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.affinity }} + affinity: +{{ toYaml .Values.pushgateway.affinity | indent 8 }} + {{- end }} + {{- if .Values.pushgateway.persistentVolume.enabled }} + volumes: + - name: storage-volume + persistentVolumeClaim: + claimName: {{ if .Values.pushgateway.persistentVolume.existingClaim }}{{ .Values.pushgateway.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.pushgateway.fullname" . }}{{- end }} + {{- end -}} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/ingress.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/ingress.yaml new file mode 100644 index 000000000..5f176aed4 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/ingress.yaml @@ -0,0 +1,54 @@ +{{- if and .Values.pushgateway.enabled .Values.pushgateway.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.pushgateway.fullname" . }} +{{- $servicePort := .Values.pushgateway.service.servicePort -}} +{{- $ingressPath := .Values.pushgateway.ingress.path -}} +{{- $ingressPathType := .Values.pushgateway.ingress.pathType -}} +{{- $extraPaths := .Values.pushgateway.ingress.extraPaths -}} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: +{{- if .Values.pushgateway.ingress.annotations }} + annotations: +{{ toYaml .Values.pushgateway.ingress.annotations | indent 4}} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.pushgateway.ingress.ingressClassName }} + ingressClassName: {{ .Values.pushgateway.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.pushgateway.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end -}} +{{- if .Values.pushgateway.ingress.tls }} + tls: +{{ toYaml .Values.pushgateway.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/netpol.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/netpol.yaml new file mode 100644 index 000000000..c8d1fb37e --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/netpol.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.pushgateway.enabled .Values.networkPolicy.enabled -}} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 6 }} + ingress: + - from: + - podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 12 }} + - ports: + - port: 9091 +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/pdb.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/pdb.yaml new file mode 100644 index 000000000..50beb486d --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.pushgateway.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.pushgateway.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.pushgateway.labels" . | nindent 6 }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/psp.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/psp.yaml new file mode 100644 index 000000000..1ca3267f8 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/psp.yaml @@ -0,0 +1,42 @@ +{{- if and .Values.pushgateway.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.pushgateway.fullname" . }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + annotations: +{{- if .Values.pushgateway.podSecurityPolicy.annotations }} +{{ toYaml .Values.pushgateway.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + - ALL + volumes: + - 'persistentVolumeClaim' + - 'secret' + allowedHostPaths: + - pathPrefix: {{ .Values.pushgateway.persistentVolume.mountPath }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: true +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/pvc.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/pvc.yaml new file mode 100644 index 000000000..908f4e2f2 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/pvc.yaml @@ -0,0 +1,37 @@ +{{- if .Values.pushgateway.persistentVolume.enabled -}} +{{- if not .Values.pushgateway.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.pushgateway.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.pushgateway.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + accessModes: +{{ toYaml .Values.pushgateway.persistentVolume.accessModes | indent 4 }} +{{- if .Values.pushgateway.persistentVolume.storageClass }} + {{- if (eq "-" .Values.pushgateway.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.pushgateway.persistentVolume.storageClass }}" + {{- end }} +{{- else if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.global.persistence.storageClass }}" + {{- end }} +{{- end }} +{{- if .Values.pushgateway.persistentVolume.volumeBindingMode }} + volumeBindingModeName: "{{ .Values.pushgateway.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.pushgateway.persistentVolume.size }}" +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/service.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/service.yaml new file mode 100644 index 000000000..f05f17c42 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/service.yaml @@ -0,0 +1,41 @@ +{{- if .Values.pushgateway.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.pushgateway.service.annotations }} + annotations: +{{ toYaml .Values.pushgateway.service.annotations | indent 4}} +{{- end }} + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} +{{- if .Values.pushgateway.service.labels }} +{{ toYaml .Values.pushgateway.service.labels | indent 4}} +{{- end }} + name: {{ template "prometheus.pushgateway.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.pushgateway.service.clusterIP }} + clusterIP: {{ .Values.pushgateway.service.clusterIP }} +{{- end }} +{{- if .Values.pushgateway.service.externalIPs }} + externalIPs: +{{ toYaml .Values.pushgateway.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.pushgateway.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.pushgateway.service.loadBalancerIP }} +{{- end }} +{{- if .Values.pushgateway.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.pushgateway.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.pushgateway.service.servicePort }} + protocol: TCP + targetPort: 9091 + selector: + {{- include "prometheus.pushgateway.matchLabels" . | nindent 4 }} + type: "{{ .Values.pushgateway.service.type }}" +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/serviceaccount.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/serviceaccount.yaml new file mode 100644 index 000000000..8c0b876f3 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/pushgateway/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and .Values.pushgateway.enabled .Values.serviceAccounts.pushgateway.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.pushgateway.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.pushgateway" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.pushgateway.annotations | indent 4 }} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/clusterrole.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/clusterrole.yaml new file mode 100644 index 000000000..539c56304 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/clusterrole.yaml @@ -0,0 +1,48 @@ +{{- if and .Values.server.enabled .Values.rbac.create (empty .Values.server.useExistingClusterRoleName) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.clusterrolefullname" . }} +rules: +{{- if .Values.podSecurityPolicy.enabled }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.server.fullname" . }} +{{- end }} + - apiGroups: + - "" + resources: + - nodes + - nodes/proxy + - nodes/metrics + - services + - endpoints + - pods + - ingresses + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + - "networking.k8s.io" + resources: + - ingresses/status + - ingresses + verbs: + - get + - list + - watch + - nonResourceURLs: + - "/metrics" + verbs: + - get +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/clusterrolebinding.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/clusterrolebinding.yaml new file mode 100644 index 000000000..3c42e5827 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.server.enabled .Values.rbac.create (empty .Values.server.namespaces) (empty .Values.server.useExistingClusterRoleName) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.clusterrolefullname" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" . }} +{{ include "prometheus.namespace" . | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ template "prometheus.server.clusterrolefullname" . }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/cm.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/cm.yaml new file mode 100644 index 000000000..e012694fc --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/cm.yaml @@ -0,0 +1,82 @@ +{{- if .Values.server.enabled -}} +{{- if (empty .Values.server.configMapOverrideName) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +data: +{{- $root := . -}} +{{- range $key, $value := .Values.serverFiles }} + {{ $key }}: | +{{- if eq $key "prometheus.yml" }} + global: +{{ $root.Values.server.global | toYaml | trimSuffix "\n" | indent 6 }} +{{- if $root.Values.server.remoteWrite }} + remote_write: +{{ $root.Values.server.remoteWrite | toYaml | indent 4 }} +{{- end }} +{{- if $root.Values.server.remoteRead }} + remote_read: +{{ $root.Values.server.remoteRead | toYaml | indent 4 }} +{{- end }} +{{- end }} +{{- if eq $key "alerts" }} +{{- if and (not (empty $value)) (empty $value.groups) }} + groups: +{{- range $ruleKey, $ruleValue := $value }} + - name: {{ $ruleKey -}}.rules + rules: +{{ $ruleValue | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} +{{- else }} +{{ toYaml $value | indent 4 }} +{{- end }} +{{- else }} +{{ toYaml $value | default "{}" | indent 4 }} +{{- end }} +{{- if eq $key "prometheus.yml" -}} +{{- if $root.Values.extraScrapeConfigs }} +{{ tpl $root.Values.extraScrapeConfigs $root | indent 4 }} +{{- end -}} +{{- if or ($root.Values.alertmanager.enabled) ($root.Values.server.alertmanagers) }} + alerting: +{{- if $root.Values.alertRelabelConfigs }} +{{ $root.Values.alertRelabelConfigs | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} + alertmanagers: +{{- if $root.Values.server.alertmanagers }} +{{ toYaml $root.Values.server.alertmanagers | indent 8 }} +{{- else }} + - kubernetes_sd_configs: + - role: pod + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if $root.Values.alertmanager.prefixURL }} + path_prefix: {{ $root.Values.alertmanager.prefixURL }} + {{- end }} + relabel_configs: + - source_labels: [__meta_kubernetes_namespace] + regex: {{ $root.Release.Namespace }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_app] + regex: {{ template "prometheus.name" $root }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_component] + regex: alertmanager + action: keep + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_probe] + regex: {{ index $root.Values.alertmanager.podAnnotations "prometheus.io/probe" | default ".*" }} + action: keep + - source_labels: [__meta_kubernetes_pod_container_port_number] + regex: "9093" + action: keep +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/deploy.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/deploy.yaml new file mode 100644 index 000000000..4b9e11909 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/deploy.yaml @@ -0,0 +1,261 @@ +{{- if .Values.server.enabled -}} +{{- if not .Values.server.statefulSet.enabled -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.server.deploymentAnnotations }} + annotations: + {{ toYaml .Values.server.deploymentAnnotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + {{- if .Values.server.strategy }} + strategy: +{{ toYaml .Values.server.strategy | trim | indent 4 }} + {{ if eq .Values.server.strategy.type "Recreate" }}rollingUpdate: null{{ end }} +{{- end }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: + {{ toYaml .Values.server.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.podLabels}} + {{ toYaml .Values.server.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} +{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} + {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} + enableServiceLinks: true + {{- else }} + enableServiceLinks: false + {{- end }} +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} + {{- if .Values.server.extraInitContainers }} + initContainers: +{{ toYaml .Values.server.extraInitContainers | indent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.prometheus.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} + image: "{{ include "get.cmreloadimage" .}}" + imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} + - --volume-dir={{ . }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + image: "{{ include "get.serverimage" .}}" + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.prefixURL }} + - --web.route-prefix={{ .Values.server.prefixURL }} + {{- end }} + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + {{- if .Values.server.storagePath }} + - --storage.tsdb.path={{ .Values.server.storagePath }} + {{- else }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + {{- end }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + ports: + - containerPort: 9090 + readinessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} + successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} + livenessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} + successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} + resources: +{{ toYaml .Values.server.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- range $name, $spec := .Values.server.sidecarContainers }} + - name: {{ $name }} + {{- if kindIs "string" $spec }} + {{- tpl $spec $ | nindent 10 }} + {{- else }} + {{- toYaml $spec | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + hostNetwork: {{ .Values.server.hostNetwork }} + {{- if .Values.server.dnsPolicy }} + dnsPolicy: {{ .Values.server.dnsPolicy }} + {{- end }} + {{- if (or .Values.global.imagePullSecret .Values.imagePullSecrets) }} + imagePullSecrets: + {{- if .Values.global.imagePullSecret }} + - name: {{ .Values.global.imagePullSecret }} + {{- end }} + {{- if .Values.imagePullSecrets }} +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.hostAliases }} + hostAliases: +{{ toYaml .Values.server.hostAliases | indent 8 }} + {{- end }} + {{- if .Values.server.dnsConfig }} + dnsConfig: +{{ toYaml .Values.server.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.server.securityContext }} + securityContext: +{{ toYaml .Values.server.securityContext | indent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} + - name: storage-volume + {{- if .Values.server.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + emptyDir: + {{- if .Values.server.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/headless-svc.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/headless-svc.yaml new file mode 100644 index 000000000..d519f4e0e --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/headless-svc.yaml @@ -0,0 +1,37 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.statefulSet.headless.annotations }} + annotations: +{{ toYaml .Values.server.statefulSet.headless.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.statefulSet.headless.labels }} +{{ toYaml .Values.server.statefulSet.headless.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }}-headless +{{ include "prometheus.namespace" . | indent 2 }} +spec: + clusterIP: None + ports: + - name: http + port: {{ .Values.server.statefulSet.headless.servicePort }} + protocol: TCP + targetPort: 9090 + {{- if .Values.server.statefulSet.headless.gRPC.enabled }} + - name: grpc + port: {{ .Values.server.statefulSet.headless.gRPC.servicePort }} + protocol: TCP + targetPort: 10901 + {{- if .Values.server.statefulSet.headless.gRPC.nodePort }} + nodePort: {{ .Values.server.statefulSet.headless.gRPC.nodePort }} + {{- end }} + {{- end }} + + selector: + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/ingress.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/ingress.yaml new file mode 100644 index 000000000..000f39cab --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/ingress.yaml @@ -0,0 +1,59 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.server.fullname" . }} +{{- $servicePort := .Values.server.service.servicePort -}} +{{- $ingressPath := .Values.server.ingress.path -}} +{{- $ingressPathType := .Values.server.ingress.pathType -}} +{{- $extraPaths := .Values.server.ingress.extraPaths -}} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: +{{- if .Values.server.ingress.annotations }} + annotations: +{{ toYaml .Values.server.ingress.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- range $key, $value := .Values.server.ingress.extraLabels }} + {{ $key }}: {{ $value }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.server.ingress.ingressClassName }} + ingressClassName: {{ .Values.server.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.server.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end -}} +{{- if .Values.server.ingress.tls }} + tls: +{{ toYaml .Values.server.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/netpol.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/netpol.yaml new file mode 100644 index 000000000..c8870e9ff --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/netpol.yaml @@ -0,0 +1,18 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + ingress: + - ports: + - port: 9090 +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/pdb.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/pdb.yaml new file mode 100644 index 000000000..364cb5b49 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/pdb.yaml @@ -0,0 +1,14 @@ +{{- if .Values.server.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + maxUnavailable: {{ .Values.server.podDisruptionBudget.maxUnavailable }} + selector: + matchLabels: + {{- include "prometheus.server.labels" . | nindent 6 }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/psp.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/psp.yaml new file mode 100644 index 000000000..e2b885f16 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/psp.yaml @@ -0,0 +1,51 @@ +{{- if and .Values.server.enabled .Values.rbac.create .Values.podSecurityPolicy.enabled }} +apiVersion: {{ template "prometheus.podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + annotations: +{{- if .Values.server.podSecurityPolicy.annotations }} +{{ toYaml .Values.server.podSecurityPolicy.annotations | indent 4 }} +{{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + allowedCapabilities: + - 'CHOWN' + volumes: + - 'configMap' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'secret' + - 'hostPath' + allowedHostPaths: + - pathPrefix: /etc + readOnly: true + - pathPrefix: {{ .Values.server.persistentVolume.mountPath }} + {{- range .Values.server.extraHostPathMounts }} + - pathPrefix: {{ .hostPath }} + readOnly: {{ .readOnly }} + {{- end }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/pvc.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/pvc.yaml new file mode 100644 index 000000000..cef89151b --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/pvc.yaml @@ -0,0 +1,41 @@ +{{- if .Values.server.enabled -}} +{{- if not .Values.server.statefulSet.enabled -}} +{{- if .Values.server.persistentVolume.enabled -}} +{{- if not .Values.server.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }} +{{- if .Values.server.persistentVolume.storageClass }} + {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" + {{- end }} +{{- else if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.global.persistence.storageClass }}" + {{- end }} +{{- end }} +{{- if .Values.server.persistentVolume.volumeBindingMode }} + volumeBindingModeName: "{{ .Values.server.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/rolebinding.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/rolebinding.yaml new file mode 100644 index 000000000..93ce3ee13 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and .Values.server.enabled .Values.rbac.create .Values.server.useExistingClusterRoleName .Values.server.namespaces -}} +{{ range $.Values.server.namespaces -}} +--- +apiVersion: {{ template "rbac.apiVersion" $ }} +kind: RoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" $ | nindent 4 }} + name: {{ template "prometheus.server.fullname" $ }} + namespace: {{ . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" $ }} +{{ include "prometheus.namespace" $ | indent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ $.Values.server.useExistingClusterRoleName }} +{{ end -}} +{{ end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/service.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/service.yaml new file mode 100644 index 000000000..68f988927 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/service.yaml @@ -0,0 +1,60 @@ +{{- if .Values.server.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.service.annotations }} + annotations: +{{ toYaml .Values.server.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.service.labels }} +{{ toYaml .Values.server.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: +{{- if .Values.server.service.clusterIP }} + clusterIP: {{ .Values.server.service.clusterIP }} +{{- end }} +{{- if .Values.server.service.externalIPs }} + externalIPs: +{{ toYaml .Values.server.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.server.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.server.service.loadBalancerIP }} +{{- end }} +{{- if .Values.server.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.server.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.server.service.servicePort }} + protocol: TCP + targetPort: 9090 + {{- if .Values.server.service.nodePort }} + nodePort: {{ .Values.server.service.nodePort }} + {{- end }} + {{- if .Values.server.service.gRPC.enabled }} + - name: grpc + port: {{ .Values.server.service.gRPC.servicePort }} + protocol: TCP + targetPort: 10901 + {{- if .Values.server.service.gRPC.nodePort }} + nodePort: {{ .Values.server.service.gRPC.nodePort }} + {{- end }} + {{- end }} + selector: + {{- if and .Values.server.statefulSet.enabled .Values.server.service.statefulsetReplica.enabled }} + statefulset.kubernetes.io/pod-name: {{ template "prometheus.server.fullname" . }}-{{ .Values.server.service.statefulsetReplica.replica }} + {{- else -}} + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- if .Values.server.service.sessionAffinity }} + sessionAffinity: {{ .Values.server.service.sessionAffinity }} +{{- end }} + {{- end }} + type: "{{ .Values.server.service.type }}" +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/serviceaccount.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/serviceaccount.yaml new file mode 100644 index 000000000..9c0502ab7 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/serviceaccount.yaml @@ -0,0 +1,13 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.serviceAccounts.server.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.server" . }} +{{ include "prometheus.namespace" . | indent 2 }} + annotations: +{{ toYaml .Values.serviceAccounts.server.annotations | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/sts.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/sts.yaml new file mode 100644 index 000000000..b0e1e8bdb --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/sts.yaml @@ -0,0 +1,285 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: +{{- if .Values.server.statefulSet.annotations }} + annotations: + {{ toYaml .Values.server.statefulSet.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- if .Values.server.statefulSet.labels}} + {{ toYaml .Values.server.statefulSet.labels | nindent 4 }} + {{- end}} + name: {{ template "prometheus.server.fullname" . }} +{{ include "prometheus.namespace" . | indent 2 }} +spec: + serviceName: {{ template "prometheus.server.fullname" . }}-headless + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + podManagementPolicy: {{ .Values.server.statefulSet.podManagementPolicy }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: + {{ toYaml .Values.server.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.podLabels}} + {{ toYaml .Values.server.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} +{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} + {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} + enableServiceLinks: true + {{- else }} + enableServiceLinks: false + {{- end }} +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} + {{- if .Values.server.extraInitContainers }} + initContainers: +{{ toYaml .Values.server.extraInitContainers | indent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.prometheus.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} + image: "{{ include "get.cmreloadimage" .}}" + imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" + args: + - --volume-dir=/etc/config + - --webhook-url=http://127.0.0.1:9090{{ .Values.server.prefixURL }}/-/reload + {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} + - --volume-dir={{ . }} + {{- end }} + resources: +{{ toYaml .Values.configmapReload.prometheus.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + image: "{{ include "get.serverimage" .}}" + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.prefixURL }} + - --web.route-prefix={{ .Values.server.prefixURL }} + {{- end }} + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + {{- if .Values.server.storagePath }} + - --storage.tsdb.path={{ .Values.server.storagePath }} + {{- else }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + {{- end }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + ports: + - containerPort: 9090 + readinessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} + successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} + livenessProbe: + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} + successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} + resources: +{{ toYaml .Values.server.resources | indent 12 }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- range $name, $spec := .Values.server.sidecarContainers }} + - name: {{ $name }} + {{- if kindIs "string" $spec }} + {{- tpl $spec $ | nindent 10 }} + {{- else }} + {{- toYaml $spec | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + hostNetwork: {{ .Values.server.hostNetwork }} + {{- if .Values.server.dnsPolicy }} + dnsPolicy: {{ .Values.server.dnsPolicy }} + {{- end }} + {{- if (or .Values.global.imagePullSecret .Values.imagePullSecrets) }} + imagePullSecrets: + {{- if .Values.global.imagePullSecrets }} + - name: {{ .Values.global.imagePullSecret }} + {{- end }} + {{- if .Values.imagePullSecrets }} +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.hostAliases }} + hostAliases: +{{ toYaml .Values.server.hostAliases | indent 8 }} + {{- end }} + {{- if .Values.server.dnsConfig }} + dnsConfig: +{{ toYaml .Values.server.dnsConfig | indent 8 }} + {{- end }} + {{- if .Values.server.securityContext }} + securityContext: +{{ toYaml .Values.server.securityContext | indent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} +{{- if .Values.server.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: storage-volume + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 10 }} + {{- end }} + spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 10 }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" + {{- if .Values.server.persistentVolume.storageClass }} + {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" + {{- end }} + {{- else if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.global.persistence.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: storage-volume + emptyDir: + {{- if .Values.server.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/vpa.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/vpa.yaml new file mode 100644 index 000000000..981a9b485 --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/templates/server/vpa.yaml @@ -0,0 +1,24 @@ +{{- if .Values.server.enabled -}} +{{- if .Values.server.verticalAutoscaler.enabled -}} +apiVersion: autoscaling.k8s.io/v1beta2 +kind: VerticalPodAutoscaler +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }}-vpa +{{ include "prometheus.namespace" . | indent 2 }} +spec: + targetRef: + apiVersion: "apps/v1" +{{- if .Values.server.statefulSet.enabled }} + kind: StatefulSet +{{- else }} + kind: Deployment +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + updatePolicy: + updateMode: {{ .Values.server.verticalAutoscaler.updateMode | default "Off" | quote }} + resourcePolicy: + containerPolicies: {{ .Values.server.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} +{{- end -}} {{/* if .Values.server.verticalAutoscaler.enabled */}} +{{- end -}} {{/* .Values.server.enabled */}} diff --git a/charts/k10/k10/4.5.1400/charts/prometheus/values.yaml b/charts/k10/k10/4.5.1400/charts/prometheus/values.yaml new file mode 100644 index 000000000..2c33498ec --- /dev/null +++ b/charts/k10/k10/4.5.1400/charts/prometheus/values.yaml @@ -0,0 +1,1737 @@ +k10image: + registry: gcr.io + repository: kasten-images + +rbac: + create: true + +podSecurityPolicy: + enabled: false + +imagePullSecrets: +# - name: "image-pull-secret" + +## Define serviceAccount names for components. Defaults to component's fully qualified name. +## +serviceAccounts: + alertmanager: + create: true + name: + annotations: {} + nodeExporter: + create: true + name: + annotations: {} + pushgateway: + create: true + name: + annotations: {} + server: + create: true + name: + annotations: {} + +alertmanager: + ## If false, alertmanager will not be installed + ## + enabled: true + + ## Use a ClusterRole (and ClusterRoleBinding) + ## - If set to false - we define a Role and RoleBinding in the defined namespaces ONLY + ## This makes alertmanager work - for users who do not have ClusterAdmin privs, but wants alertmanager to operate on their own namespaces, instead of clusterwide. + useClusterRole: true + + ## Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. + useExistingRole: false + + ## alertmanager container name + ## + name: alertmanager + + ## alertmanager container image + ## + image: + repository: quay.io/prometheus/alertmanager + tag: v0.21.0 + pullPolicy: IfNotPresent + + ## alertmanager priorityClassName + ## + priorityClassName: "" + + ## Additional alertmanager container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access alertmanager + baseURL: "http://localhost:9093" + + ## Additional alertmanager container environment variable + ## For instance to add a http_proxy + ## + extraEnv: {} + + ## Additional alertmanager Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: alertmanager-secret-files + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config + ## Defining configFromSecret will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configFromSecret: "" + + ## The configuration file name to be loaded to alertmanager + ## Must match the key within configuration loaded from ConfigMap/Secret + ## + configFileName: alertmanager.yml + + ingress: + ## If true, alertmanager Ingress will be created + ## + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + ## alertmanager Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## alertmanager Ingress additional labels + ## + extraLabels: {} + + ## alertmanager Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - alertmanager.domain.com + # - domain.com/alertmanager + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## alertmanager Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - alertmanager.domain.com + + ## Alertmanager Deployment Strategy type + # strategy: + # type: Recreate + + ## Node tolerations for alertmanager scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for alertmanager pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, alertmanager will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## alertmanager data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## alertmanager data Persistent Volume Claim annotations + ## + annotations: {} + + ## alertmanager data Persistent Volume existing claim name + ## Requires alertmanager.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## alertmanager data Persistent Volume mount root path + ## + mountPath: /data + + ## alertmanager data Persistent Volume size + ## + size: 2Gi + + ## alertmanager data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## alertmanager data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of alertmanager data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + emptyDir: + ## alertmanager emptyDir volume size limit + ## + sizeLimit: "" + + ## Annotations to be added to alertmanager pods + ## + podAnnotations: {} + ## Tell prometheus to use a specific set of alertmanager pods + ## instead of all alertmanager pods found in the same namespace + ## Useful if you deploy multiple releases within the same namespace + ## + ## prometheus.io/probe: alertmanager-teamA + + ## Labels to be added to Prometheus AlertManager pods + ## + podLabels: {} + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + annotations: {} + labels: {} + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + + ## Enabling peer mesh service end points for enabling the HA alert manager + ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md + enableMeshPeer: false + + servicePort: 80 + + ## alertmanager resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + # Custom DNS configuration to be added to alertmanager pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to alertmanager pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + service: + annotations: {} + labels: {} + clusterIP: "" + + ## Enabling peer mesh service end points for enabling the HA alert manager + ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md + # enableMeshPeer : true + + ## List of IP addresses at which the alertmanager service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + # nodePort: 30000 + sessionAffinity: None + type: ClusterIP + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/jimmidyson/configmap-reload +## +configmapReload: + prometheus: + ## If false, the configmap-reload container will not be deployed + ## + enabled: true + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.5.0 + pullPolicy: IfNotPresent + + ## Additional configmap-reload container arguments + ## + extraArgs: {} + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] + + + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + alertmanager: + ## If false, the configmap-reload container will not be deployed + ## + enabled: true + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.5.0 + pullPolicy: IfNotPresent + + ## Additional configmap-reload container arguments + ## + extraArgs: {} + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] + + + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + +kubeStateMetrics: + ## If false, kube-state-metrics sub-chart will not be installed + ## + enabled: true + +## kube-state-metrics sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics +## +# kube-state-metrics: + +nodeExporter: + ## If false, node-exporter will not be installed + ## + enabled: true + + ## If true, node-exporter pods share the host network namespace + ## + hostNetwork: true + + ## If true, node-exporter pods share the host PID namespace + ## + hostPID: true + + ## If true, node-exporter pods mounts host / at /host/root + ## + hostRootfs: true + + ## node-exporter container name + ## + name: node-exporter + + ## node-exporter container image + ## + image: + repository: quay.io/prometheus/node-exporter + tag: v1.1.2 + pullPolicy: IfNotPresent + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## node-exporter priorityClassName + ## + priorityClassName: "" + + ## Custom Update Strategy + ## + updateStrategy: + type: RollingUpdate + + ## Additional node-exporter container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## Additional node-exporter hostPath mounts + ## + extraHostPathMounts: [] + # - name: textfile-dir + # mountPath: /srv/txt_collector + # hostPath: /var/lib/node-exporter + # readOnly: true + # mountPropagation: HostToContainer + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # configMap: certs-configmap + # readOnly: true + + ## Node tolerations for node-exporter scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for node-exporter pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to node-exporter pods + ## + podAnnotations: {} + + ## Labels to be added to node-exporter pods + ## + pod: + labels: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## node-exporter resource limits & requests + ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + + # Custom DNS configuration to be added to node-exporter pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to node-exporter pods + ## + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + + service: + annotations: + prometheus.io/scrape: "true" + labels: {} + + # Exposed as a headless service: + # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services + clusterIP: None + + ## List of IP addresses at which the node-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + hostPort: 9100 + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9100 + type: ClusterIP + +server: + ## Prometheus server container name + ## + enabled: true + + ## Use a ClusterRole (and ClusterRoleBinding) + ## - If set to false - we define a RoleBinding in the defined namespaces ONLY + ## + ## NB: because we need a Role with nonResourceURL's ("/metrics") - you must get someone with Cluster-admin privileges to define this role for you, before running with this setting enabled. + ## This makes prometheus work - for users who do not have ClusterAdmin privs, but wants prometheus to operate on their own namespaces, instead of clusterwide. + ## + ## You MUST also set namespaces to the ones you have access to and want monitored by Prometheus. + ## + # useExistingClusterRoleName: nameofclusterrole + + ## namespaces to monitor (instead of monitoring all - clusterwide). Needed if you want to run without Cluster-admin privileges. + # namespaces: + # - yournamespace + + name: server + + # sidecarContainers - add more containers to prometheus server + # Key/Value where Key is the sidecar `- name: ` + # Example: + # sidecarContainers: + # webserver: + # image: nginx + sidecarContainers: {} + + # sidecarTemplateValues - context to be used in template for sidecarContainers + # Example: + # sidecarTemplateValues: *your-custom-globals + # sidecarContainers: + # webserver: |- + # {{ include "webserver-container-template" . }} + # Template for `webserver-container-template` might looks like this: + # image: "{{ .Values.server.sidecarTemplateValues.repository }}:{{ .Values.server.sidecarTemplateValues.tag }}" + # ... + # + sidecarTemplateValues: {} + + ## Prometheus server container image + ## + image: + repository: quay.io/prometheus/prometheus + tag: v2.26.0 + pullPolicy: IfNotPresent + + ## prometheus server priorityClassName + ## + priorityClassName: "" + + ## EnableServiceLinks indicates whether information about services should be injected + ## into pod's environment variables, matching the syntax of Docker links. + ## WARNING: the field is unsupported and will be skipped in K8s prior to v1.13.0. + ## + enableServiceLinks: true + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access prometheus + ## Maybe same with Ingress host name + baseURL: "" + + ## Additional server container environment variables + ## + ## You specify this manually like you would a raw deployment manifest. + ## This means you can bind in environment variables from secrets. + ## + ## e.g. static environment variable: + ## - name: DEMO_GREETING + ## value: "Hello from the environment" + ## + ## e.g. secret environment variable: + ## - name: USERNAME + ## valueFrom: + ## secretKeyRef: + ## name: mysecret + ## key: username + env: [] + + extraFlags: + - web.enable-lifecycle + ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as + ## deleting time series. This is disabled by default. + # - web.enable-admin-api + ## + ## storage.tsdb.no-lockfile flag controls BD locking + # - storage.tsdb.no-lockfile + ## + ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) + # - storage.tsdb.wal-compression + + ## Path to a configuration file on prometheus server container FS + configPath: /etc/config/prometheus.yml + + ### The data directory used by prometheus to set --storage.tsdb.path + ### When empty server.persistentVolume.mountPath is used instead + storagePath: "" + + global: + ## How frequently to scrape targets by default + ## + scrape_interval: 1m + ## How long until a scrape request times out + ## + scrape_timeout: 10s + ## How frequently to evaluate rules + ## + evaluation_interval: 1m + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write + ## + remoteWrite: [] + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read + ## + remoteRead: [] + + ## Additional Prometheus server container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## Additional Prometheus server Volume mounts + ## + extraVolumeMounts: [] + + ## Additional Prometheus server Volumes + ## + extraVolumes: [] + + ## Additional Prometheus server hostPath mounts + ## + extraHostPathMounts: [] + # - name: certs-dir + # mountPath: /etc/kubernetes/certs + # subPath: "" + # hostPath: /etc/kubernetes/certs + # readOnly: true + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # subPath: "" + # configMap: certs-configmap + # readOnly: true + + ## Additional Prometheus server Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: prom-secret-files + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/server-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + ## Prometheus server Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## Prometheus server Ingress additional labels + ## + extraLabels: {} + + ## Prometheus server Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - prometheus.domain.com + # - domain.com/prometheus + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-server-tls + # hosts: + # - prometheus.domain.com + + ## Server Deployment Strategy type + # strategy: + # type: Recreate + + ## hostAliases allows adding entries to /etc/hosts inside the containers + hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "example.com" + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for Prometheus server pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, Prometheus server will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## Prometheus server data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## Prometheus server data Persistent Volume annotations + ## + annotations: {} + + ## Prometheus server data Persistent Volume existing claim name + ## Requires server.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## Prometheus server data Persistent Volume mount root path + ## + mountPath: /data + + ## Prometheus server data Persistent Volume size + ## + size: 8Gi + + ## Prometheus server data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Prometheus server data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of Prometheus server data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + emptyDir: + ## Prometheus server emptyDir volume size limit + ## + sizeLimit: "" + + ## Annotations to be added to Prometheus server pods + ## + podAnnotations: {} + # iam.amazonaws.com/role: prometheus + + ## Labels to be added to Prometheus server pods + ## + podLabels: {} + + ## Prometheus AlertManager configuration + ## + alertmanagers: [] + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + annotations: {} + labels: {} + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + servicePort: 80 + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## Prometheus server readiness and liveness probe initial delay and timeout + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## + readinessProbeInitialDelay: 30 + readinessProbePeriodSeconds: 5 + readinessProbeTimeout: 4 + readinessProbeFailureThreshold: 3 + readinessProbeSuccessThreshold: 1 + livenessProbeInitialDelay: 30 + livenessProbePeriodSeconds: 15 + livenessProbeTimeout: 10 + livenessProbeFailureThreshold: 3 + livenessProbeSuccessThreshold: 1 + + ## Prometheus server resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + # When hostNetwork is enabled, you probably want to set this to ClusterFirstWithHostNet + dnsPolicy: ClusterFirst + + ## Vertical Pod Autoscaler config + ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler + verticalAutoscaler: + ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) + enabled: false + # updateMode: "Auto" + # containerPolicies: + # - containerName: 'prometheus-server' + + # Custom DNS configuration to be added to prometheus server pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + ## Security context to be added to server pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + service: + annotations: {} + labels: {} + clusterIP: "" + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + sessionAffinity: None + type: ClusterIP + + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## If using a statefulSet (statefulSet.enabled=true), configure the + ## service to connect to a specific replica to have a consistent view + ## of the data. + statefulsetReplica: + enabled: false + replica: 0 + + ## Prometheus server pod termination grace period + ## + terminationGracePeriodSeconds: 300 + + ## Prometheus data retention period (default if not specified is 15 days) + ## + retention: "15d" + +pushgateway: + ## If false, pushgateway will not be installed + ## + enabled: true + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## pushgateway container name + ## + name: pushgateway + + ## pushgateway container image + ## + image: + repository: prom/pushgateway + tag: v1.3.1 + pullPolicy: IfNotPresent + + ## pushgateway priorityClassName + ## + priorityClassName: "" + + ## Additional pushgateway container arguments + ## + ## for example: persistence.file: /data/pushgateway.data + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ingress: + ## If true, pushgateway Ingress will be created + ## + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + ## pushgateway Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## pushgateway Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - pushgateway.domain.com + # - domain.com/pushgateway + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## pushgateway Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - pushgateway.domain.com + + ## Node tolerations for pushgateway scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for pushgateway pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Annotations to be added to pushgateway pods + ## + podAnnotations: {} + + ## Labels to be added to pushgateway pods + ## + podLabels: {} + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## pushgateway resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + # Custom DNS configuration to be added to push-gateway pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to push-gateway pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + + service: + annotations: + prometheus.io/probe: pushgateway + labels: {} + clusterIP: "" + + ## List of IP addresses at which the pushgateway service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9091 + type: ClusterIP + + ## pushgateway Deployment Strategy type + # strategy: + # type: Recreate + + persistentVolume: + ## If true, pushgateway will create/use a Persistent Volume Claim + ## + enabled: false + + ## pushgateway data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## pushgateway data Persistent Volume Claim annotations + ## + annotations: {} + + ## pushgateway data Persistent Volume existing claim name + ## Requires pushgateway.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## pushgateway data Persistent Volume mount root path + ## + mountPath: /data + + ## pushgateway data Persistent Volume size + ## + size: 2Gi + + ## pushgateway data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## pushgateway data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of pushgateway data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + +## alertmanager ConfigMap entries +## +alertmanagerFiles: + alertmanager.yml: + global: {} + # slack_api_url: '' + + receivers: + - name: default-receiver + # slack_configs: + # - channel: '@you' + # send_resolved: true + + route: + group_wait: 10s + group_interval: 5m + receiver: default-receiver + repeat_interval: 3h + +## Prometheus server ConfigMap entries +## +serverFiles: + + ## Alerts configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + alerting_rules.yml: {} + # groups: + # - name: Instances + # rules: + # - alert: InstanceDown + # expr: up == 0 + # for: 5m + # labels: + # severity: page + # annotations: + # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' + # summary: 'Instance {{ $labels.instance }} down' + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml + alerts: {} + + ## Records configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ + recording_rules.yml: {} + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml + rules: {} + + prometheus.yml: + rule_files: + - /etc/config/recording_rules.yml + - /etc/config/alerting_rules.yml + ## Below two files are DEPRECATED will be removed from this default values file + - /etc/config/rules + - /etc/config/alerts + + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + + # Scrape config for API servers. + # + # Kubernetes exposes API servers as endpoints to the default/kubernetes + # service so this uses `endpoints` role and uses relabelling to only keep + # the endpoints associated with the default/kubernetes service using the + # default named port `https`. This works for single API server deployments as + # well as HA API server deployments. + - job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + + - job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics + + + - job_name: 'kubernetes-nodes-cadvisor' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + # This configuration will work only on kubelet 1.7.3+ + # As the scrape endpoints for cAdvisor have changed + # if you are using older version you need to change the replacement to + # replacement: /api/v1/nodes/$1:4194/proxy/metrics + # more info here https://github.com/coreos/prometheus-operator/issues/633 + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + + # Scrape config for service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + - job_name: 'kubernetes-service-endpoints' + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: kubernetes_node + + # Scrape config for slow service endpoints; same as above, but with a larger + # timeout and a larger interval + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + - job_name: 'kubernetes-service-endpoints-slow' + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: kubernetes_node + + - job_name: 'prometheus-pushgateway' + honor_labels: true + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: pushgateway + + # Example scrape config for probing services via the Blackbox Exporter. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/probe`: Only probe services that have a value of `true` + - job_name: 'kubernetes-services' + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + + # Example scrape config for pods + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape`: Only scrape pods that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods' + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + + # Example Scrape config for pods which should be scraped slower. An useful example + # would be stackriver-exporter which queries an API on every scrape of the pod + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods-slow' + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + +# adds additional scrape configs to prometheus.yml +# must be a string so you have to add a | after extraScrapeConfigs: +# example adds prometheus-blackbox-exporter scrape config +extraScrapeConfigs: + # - job_name: 'prometheus-blackbox-exporter' + # metrics_path: /probe + # params: + # module: [http_2xx] + # static_configs: + # - targets: + # - https://example.com + # relabel_configs: + # - source_labels: [__address__] + # target_label: __param_target + # - source_labels: [__param_target] + # target_label: instance + # - target_label: __address__ + # replacement: prometheus-blackbox-exporter:9115 + +# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager +# useful in H/A prometheus with different external labels but the same alerts +alertRelabelConfigs: + # alert_relabel_configs: + # - source_labels: [dc] + # regex: (.+)\d+ + # target_label: dc + +networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false + +# Force namespace of namespaced resources +forceNamespace: null diff --git a/charts/k10/k10/4.5.1400/config.json b/charts/k10/k10/4.5.1400/config.json new file mode 100644 index 000000000..e69de29bb diff --git a/charts/k10/k10/4.5.1400/eula.txt b/charts/k10/k10/4.5.1400/eula.txt new file mode 100644 index 000000000..4e44a6b61 --- /dev/null +++ b/charts/k10/k10/4.5.1400/eula.txt @@ -0,0 +1,459 @@ +KASTEN END USER LICENSE AGREEMENT + +This End User License Agreement is a binding agreement between Kasten, Inc., a +Delaware Corporation ("Kasten"), and you ("Licensee"), and establishes the terms +under which Licensee may use the Software and Documentation (as defined below), +including without limitation terms and conditions relating to license grant, +intellectual property rights, disclaimers /exclusions / limitations of warranty, +indemnity and liability, governing law and limitation periods. All components +collectively are referred to herein as the "Agreement." + +LICENSEE ACKNOWLEDGES IT HAS HAD THE OPPORTUNITY TO REVIEW THE AGREEMENT, PRIOR +TO ACCEPTANCE OF THIS AGREEMENT. LICENSEE'S ACCEPTANCE OF THIS AGREEMENT IS +EVIDENCED BY LICENSEE'S DOWNLOADING, COPYING, INSTALLING OR USING THE KASTEN +SOFTWARE. IF YOU ARE ACTING ON BEHALF OF A COMPANY, YOU REPRESENT THAT YOU ARE +AUTHORIZED TO BIND THE COMPANY. IF YOU DO NOT AGREE TO ALL TERMS OF THIS +AGREEMENT, DO NOT DOWNLOAD, COPY, INSTALL, OR USE THE SOFTWARE, AND PERMANENTLY +DELETE THE SOFTWARE. + +1. DEFINITIONS + +1.1 "Authorized Persons" means trained technical employees and contractors of +Licensee who are subject to a written agreement with Licensee that includes use +and confidentiality restrictions that are at least as protective as those set +forth in this Agreement. + +1.2 "Authorized Reseller" means a distributor or reseller, including cloud +computing platform providers, authorized by Kasten to resell licenses to the +Software through the channel through or in the territory in which Licensee is +purchasing. + +1.3 "Confidential Information" means all non-public information disclosed in +written, oral or visual form by either party to the other. Confidential +Information may include, but is not limited to, services, pricing information, +computer programs, source code, names and expertise of employees and +consultants, know-how, and other technical, business, financial and product +development information. "Confidential Information" does not include any +information that the receiving party can demonstrate by its written records (1) +was rightfully known to it without obligation of confidentiality prior to its +disclosure hereunder by the disclosing party; (2) is or becomes publicly known +through no wrongful act of the receiving party; (3) has been rightfully received +without obligation of confidentiality from a third party authorized to make such +a disclosure; or (4) is independently developed by the receiving party without +reference to confidential information disclosed hereunder. + +1.4 "Documentation" means any administration guides, installation and user +guides, and release notes that are provided by Kasten to Licensee with the +Software. + +1.5 "Intellectual Property Rights" means patents, design patents, copyrights, +trademarks, Confidential Information, know-how, trade secrets, moral rights, and +any other intellectual property rights recognized in any country or jurisdiction +in the world. + +1.6 "Node" means a single physical or virtual computing machine recognizable by +the Software as a unique device. Nodes must be owned or leased by Licensee or an +entity controlled by, controlling or under common control with Licensee. + +1.7 "Edition" means a unique identifier for each distinct product that is made +available by Kasten and that can be licensed, including summary information +regarding any associated functionality, features, or restrictions specific to +the Edition. + +1.8 "Open Source Software" means software delivered to Licensee hereunder that +is subject to the provisions of any open source license agreement. + +1.9 "Purchase Agreement" means a separate commercial agreement, if applicable, +between Kasten and the Licensee that contains the terms for the licensing of a +specific Edition of the Software. + +1.10 "Software" means any and all software product Editions licensed to Licensee +under this Agreement, all as developed by Kasten and delivered to Licensee +hereunder. Software also includes any Updates provided by Kasten to Licensee. +For the avoidance of doubt, the definition of Software shall exclude any +Third-Party Software and Open Source Software. + +1.11 "Third-Party Software" means certain software Kasten licenses from third +parties and provides to Licensee with the Software, which may include Open +Source Software. + +1.12 "Update" means a revision of the Software that Kasten makes available to +customers at no additional cost. The Update includes, if and when applicable and +available, bug fix patches, maintenance release, minor release, or new major +releases. Updates are limited only to the Software licensed by Licensee, and +specifically exclude new product offerings, features, options or functionality +of the Software that Kasten may choose to license separately, or for an +additional fee. + +1.13 "Use" means to install activate the processing capabilities of the +Software, load, execute, access, employ the Software, or display information +resulting from such capabilities. + + +2. LICENSE GRANT AND RESTRICTIONS + +2.1 Enterprise License. Subject to Licensee"s compliance with the terms and +conditions of this Agreement (including any additional restrictions on +Licensee"s use of the Software set forth in the Purchase Agreement, if one +exists, between Licensee and Kasten), Kasten grants to Licensee a non-exclusive, +non-transferable (except in connection with a permitted assignment of this +Agreement under Section 14.10 (Assignment), non-sublicensable, limited term +license to install and use the Software, in object code form only, solely for +Licensee"s use, unless terminated in accordance with Section 4 (Term and +Termination). + +2.2 Starter License. This section shall only apply when the Licensee licenses +Starter Edition of the Software. The license granted herein is for a maximum of +10 Nodes and for a period of 12 months from the date of the Software release that +embeds the specific license instance. Updating to a newer Software (minor or +major) release will always extend the validity of the license by 12 months. If +the Licensee wishes to upgrade to an Enterprise License instead, the Licensee +will have to enter into a Purchase Agreement with Kasten which will supersede +this Agreement. The Licensee is required to provide accurate email and company +information, if representing a company, when accepting this Agreement. Under no +circumstances will a Starter License be construed to mean that the Licensee is +authorized to distribute the Software to any third party for any reason +whatsoever. + +2.3 Evaluation License. This section shall only apply when the Licensee has +licensed the Software for an initial evaluation period. The license granted +herein is valid only one time 30 days, starting from date of installation, +unless otherwise explicitly designated by Kasten ("Evaluation Period"). Under +this license the Software can only be used for evaluation purposes. Under no +circumstances will an Evaluation License be construed to mean that the Licensee +is authorized to distribute the Software to any third party for any reason +whatsoever. If the Licensee wishes to upgrade to an Enterprise License instead, +the Licensee will have to enter into a Purchase Agreement with Kasten which will +supersede this Agreement.. If the Licensee does not wish to upgrade to an +Enterprise License at the end of the Evaluation Period the Licensee"s rights +under the Agreement shall terminate, and the Licensee shall delete all Kasten +Software. + +2.4 License Restrictions. Except to the extent permitted under this Agreement, +Licensee will not nor will Licensee allow any third party to: (i) copy, modify, +adapt, translate or otherwise create derivative works of the Software or the +Documentation; (ii) reverse engineer, decompile, disassemble or otherwise +attempt to discover the source code of the Software; (iii) rent, lease, sell, +assign or otherwise transfer rights in or to the Software or Documentation; (iv) +remove any proprietary notices or labels from the Software or Documentation; (v) +publicly disseminate performance information or analysis (including, without +limitation, benchmarks) relating to the Software. Licensee will comply with all +applicable laws and regulations in Licensee"s use of and access to the Software +and Documentation. + +2.5 Responsibility for Use. The Software and Documentation may be used only by +Authorized Persons and in conformance with this Agreement. Licensee shall be +responsible for the proper use and protection of the Software and Documentation +and is responsible for: (i) installing, managing, operating, and physically +controlling the Software and the results obtained from using the Software; (ii) +using the Software within the operating environment specified in the +Documentation; and; (iii) establishing and maintaining such recovery and data +protection and security procedures as necessary for Licensee's service and +operation and/or as may be specified by Kasten from time to time. + +2.6 United States Government Users. The Software licensed under this Agreement +is "commercial computer software" as that term is described in DFAR +252.227-7014(a)(1). If acquired by or on behalf of a civilian agency, the U.S. +Government acquires this commercial computer software and/or commercial computer +software documentation subject to the terms and this Agreement as specified in +48 C.F.R. 12.212 (Computer Software) and 12.211 (Technical Data) of the Federal +Acquisition Regulations ("FAR") and its successors. If acquired by or on behalf +of any agency within the Department of Defense ("DOD"), the U.S. Government +acquires this commercial computer software and/or commercial computer software +documentation subject to the terms of this Agreement as specified in 48 C.F.R. +227.7202 of the DOD FAR Supplement and its successors. + + +3. SUPPORT + +3.1 During the Term (as defined below) and subject to Licensee’s compliance +with the terms and conditions of this Agreement, Licensee may submit queries and +requests for support for Enterprise Licenses by submitting Service Requests via Veeam +Support Portal (https://my.veeam.com). Support is not provided for Starter and Evaluation +Licenses. Licensee shall be entitled to the support service-level agreement specified +in the relevant order form or purchase order (“Order Form”) between Licensee and the +Reseller and as set forth in Kasten’s Support Policy, a copy of which can be found +at https://www.kasten.io/support-services-policy. Licensee shall also be permitted to +download and install all Updates released by Kasten during the Term and made generally +available to users of the Software. Software versions with all updates and upgrades +installed is supported for six months from the date of release of that version. + +3.2 Starter Edition Support. If the Licensee has licensed Starter Edition of +the Software, you will have access to the Kasten K10 Support Community +(https://community.veeam.com/groups/kasten-k10-support-92), but Kasten cannot guarantee +a service level of any sort. Should a higher level of support be needed, Licensee has +the option to consider entering into a Purchase Agreement with Kasten for licensing a +different Edition of the Software. + + + +4. TERM AND TERMINATION + +4.1 Term. The term of this Agreement, except for Starter and Evaluation +Licenses, shall commence on the Effective Date and shall, unless terminated +earlier in accordance with the provisions of Section 4.2 below, remain in force +for the Subscription Period as set forth in the applicable Order Form(s) (the +"Term"). The parties may extend the Term of this Agreement beyond the +Subscription Period by executing additional Order Form(s) and Licensee"s payment +of additional licensing fees. The term of this Agreement for the Starter and +Evaluation Licenses will coincide with the term for Starter Edition (as stated +in section 2.2) and the term for Evaluation Period (as stated in section 2.3), +respectively + +4.2 Termination. Either party may immediately terminate this +Agreement and the licenses granted hereunder if the other party (1) becomes +insolvent and"becomes unwilling or unable to meet its obligations under this +Agreement, (2) files a petition in bankruptcy, (3) is subject to the filing of +an involuntary petition for bankruptcy which is not rescinded within a period of +forty-five (45) days, (4) fails to cure a material breach of any material term +or condition of this Agreement within thirty (30) days of receipt of written +notice specifying such breach, or (5) materially breaches its obligations of +confidentiality hereunder. + +4.3 Effects of Termination. Upon expiration or +termination of this Agreement for any reason, (i) any amounts owed to Kasten +under this Agreement will be immediately due and payable; (ii) all licensed +rights granted in this Agreement will immediately cease; and (iii) Licensee will +promptly discontinue all use of the Software and Documentation and return to +Kasten any Kasten Confidential Information in Licensee"s possession or control. + +4.4 Survival. The following Sections of this Agreement will remain in effect +following the expiration or termination of these General Terms for any reason: +4.3 (Effects of Termination), 4.4 (Survival), 5 (Third Party Software) 5 +(Confidentiality), 9 (Ownership), 10.2 (Third-Party Software), 10.3 (Warranty +Disclaimer), 11 (Limitations of Liability), 12.2 (Exceptions to Kasten +Obligation), 13 (Export) and 14 (General). + + +5. THIRD PARTY AND OPEN SOURCE SOFTWARE Certain Third-Party Software or Open +Source Software (Kasten can provide a list upon request) that may be provided +with the Software may be subject to various other terms and conditions imposed +by the licensors of such Third-Party Software or Open Source Software. The +terms of Licensee"s use of the Third-Party Software or Open Source Software is +subject to and governed by the respective Third-Party Software and Open Source +licenses, except that this Section 5 (Third-Party Software), Section 10.2 (Third +Party Software), 10.3 (Warranty Disclaimer), Section 11 (Limitations of +Liability), and Section 14 (General) of this Agreement also govern Licensee"s +use of the Third-Party Software. To the extent applicable to Licensee"s use of +such Third-Party Software and Open Source, Licensee agrees to comply with the +terms and conditions contained in all such Third-Party Software and Open Source +licenses. + + +6. CONFIDENTIALITY Neither party will use any Confidential Information of the +other party except as expressly permitted by this Agreement or as expressly +authorized in writing by the disclosing party. The receiving party shall use +the same degree of care to protect the disclosing party"s Confidential +Information as it uses to protect its own Confidential Information of like +nature, but in no circumstances less than a commercially reasonable standard of +care. The receiving party may not disclose the disclosing party"s Confidential +Information to any person or entity other than to (i) (a) Authorized Persons in +the case the receiving party is Licensee, and (b) Kasten"s employees and +contractors in the case the receiving party is Kasten, and (ii) who need access +to such Confidential Information solely for the purpose of fulfilling that +party"s obligations or exercising that party"s rights hereunder. The foregoing +obligations will not restrict the receiving party from disclosing Confidential +Information of the disclosing party: (1) pursuant to the order or requirement of +a court, administrative agency, or other governmental body, provided that the +receiving party required to make such a disclosure gives reasonable notice to +the disclosing party prior to such disclosure; and (2) on a confidential basis +to its legal and financial advisors. Kasten may identify Licensee in its +customer lists in online and print marketing materials. + + +7. FEES Fees for Enterprise License shall be set forth in separate Order Form(s) +attached to a Purchase Agreement, between the Licensee and Kasten. + +If Licensee has obtained the Software through an Authorized Reseller, fees for +licensing shall be invoiced directly by the Authorized Reseller. + +If no Purchase Agreement exists, during the term of this Agreement, Kasten +shall license the Starter Edition only and no other Edition of the Software +"at no charge" to Licensee. + + +8. USAGE DATA Kasten may collect, accumulate, and aggregate certain usage +statistics in order to analyze usage of the Software, make improvements, and +potentially develop new products. Kasten may use aggregated anonymized data for +any purpose that Kasten, at its own discretion, may consider appropriate. + + +9. OWNERSHIP As between Kasten and Licensee, all right, title and interest in +the Software, Documentation and any other Kasten materials furnished or made +available hereunder, all modifications and enhancements thereof, and all +suggestions, ideas and feedback proposed by Licensee regarding the Software and +Documentation, including all copyright rights, patent rights and other +Intellectual Property Rights in each of the foregoing, belong to and are +retained solely by Kasten or Kasten"s licensors and providers, as applicable. +Licensee hereby does and will irrevocably assign to Kasten all evaluations, +ideas, feedback and suggestions made by Licensee to Kasten regarding the +Software and Documentation (collectively, "Feedback") and all Intellectual +Property Rights in and to the Feedback. Except as expressly provided herein, no +licenses of any kind are granted hereunder, whether by implication, estoppel, or +otherwise. + + +10. LIMITED WARRANTY AND DISCLAIMERS + +10.1 Limited Warranty. Kasten warrants for a period of thirty (30) days from +the Effective Date that the Software will materially conform to Kasten"s +then-current Documentation (the "Warranty Period") when properly installed on a +computer for which a license is granted hereunder. Licensee"s exclusive remedy +for a breach of this Section 10.1 is that Kasten shall, at its option, use +commercially reasonable efforts to correct or replace the Software, or refund +all or a portion of the fees paid by Licensee pursuant to the Purchase +Agreement. Kasten, in its sole discretion, may revise this limited warranty from +time to time. + +10.2 Third-Party Software. Except as expressly set forth in this Agreement, +Third-Party Software (including any Open Source Software) are provided on an +"as-is" basis at the sole risk of Licensee. Notwithstanding any language to the +contrary in this Agreement, Kasten makes no express or implied warranties of any +kind with respect to Third-Party Software provided to Licensee and shall not be +liable for any damages regarding the use or operation of the Third-Party +Software furnished under this Agreement. Any and all express or implied +warranties, if any, arising from the license of Third-Party Software shall be +those warranties running from the third party manufacturer or licensor to +Licensee. + +10.3 Warranty Disclaimer. EXCEPT FOR THE LIMITED WARRANTY PROVIDED ABOVE, +KASTEN AND ITS SUPPLIERS MAKE NO WARRANTY OF ANY KIND, WHETHER EXPRESS, IMPLIED, +STATUTORY OR OTHERWISE, RELATING TO THE SOFTWARE OR TO KASTEN"S MAINTENANCE, +PROFESSIONAL OR OTHER SERVICES. KASTEN SPECIFICALLY DISCLAIMS ALL IMPLIED +WARRANTIES OF DESIGN, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE +AND NON-INFRINGEMENT. KASTEN AND ITS SUPPLIERS AND LICENSORS DO NOT WARRANT OR +REPRESENT THAT THE SOFTWARE WILL BE FREE FROM BUGS OR THAT ITS USE WILL BE +UNINTERRUPTED OR ERROR-FREE. THIS DISCLAIMER SHALL APPLY NOTWITHSTANDING THE +FAILURE OF THE ESSENTIAL PURPOSE OF ANY LIMITED REMEDY PROVIDED HEREIN. EXCEPT +AS STATED ABOVE, KASTEN AND ITS SUPPLIERS PROVIDE THE SOFTWARE ON AN "AS IS" +BASIS. KASTEN PROVIDES NO WARRANTIES WITH RESPECT TO THIRD PARTY SOFTWARE AND +OPEN SOURCE SOFTWARE. + + +11. LIMITATIONS OF LIABILITY + +11.1 EXCLUSION OF CERTAIN DAMAGES. EXCEPT FOR BREACHES OF SECTION 6 +(CONFIDENTIALITY) OR SECTION 9 (OWNERSHIP), IN NO EVENT WILL EITHER PARTY BE +LIABLE FOR ANY INDIRECT, CONSEQUENTIAL, EXEMPLARY, SPECIAL, INCIDENTAL OR +RELIANCE DAMAGES, INCLUDING ANY LOST DATA, LOSS OF USE AND LOST PROFITS, ARISING +FROM OR RELATING TO THIS AGREEMENT, THE SOFTWARE OR DOCUMENTATION, EVEN IF SUCH +PARTY KNEW OR SHOULD HAVE KNOWN OF THE POSSIBILITY OF, OR COULD REASONABLY HAVE +PREVENTED, SUCH DAMAGES. + +11.2 LIMITATION OF DAMAGES. EXCEPT FOR THE BREACHES OF SECTION 6 +(CONFIDENTIALITY) OR SECTION 9 (OWNERSHIP), EACH PARTY"S TOTAL CUMULATIVE +LIABILITY ARISING FROM OR RELATED TO THIS AGREEMENT OR THE SOFTWARE, +DOCUMENTATION, OR SERVICES PROVIDED BY KASTEN, WILL NOT EXCEED THE AMOUNT OF +FEES PAID OR PAYABLE BY LICENSEE FOR THE SOFTWARE, DOCUMENTATION OR SERVICES +GIVING RISE TO THE CLAIM IN THE TWELVE (12) MONTHS FOLLOWING THE EFFECTIVE DATE. +LICENSEE AGREES THAT KASTEN"S SUPPLIERS AND LICENSORS WILL HAVE NO LIABILITY OF +ANY KIND UNDER OR AS A RESULT OF THIS AGREEMENT. IN THE CASE OF KASTEN"S +INDEMNIFICATION OBLIGATIONS, KASTEN"S CUMULATIVE LIABILITY UNDER THIS AGREEMENT +SHALL BE LIMITED TO THE SUM OF THE LICENSE FEES PAID OR PAYABLE BY LICENSEE FOR +THE SOFTWARE, DOCUMENTATION OR SERVICES GIVING RISE TO THE CLAIM IN THE TWELVE +(12) MONTHS FOLLOWING THE EFFECTIVE DATE. + +11.3 THIRD PARTY SOFTWARE. NOTWITHSTANDING ANY LANGUAGE TO THE CONTRARY IN THIS +AGREEMENT, KASTEN SHALL NOT BE LIABLE FOR ANY DAMAGES REGARDING THE USE OR +OPERATION OF ANY THIRD-PARTY SOFTWARE FURNISHED UNDER THIS AGREEMENT. + +11.4 LIMITATION OF ACTIONS. IN NO EVENT MAY LICENSEE BRING ANY CAUSE OF ACTION +RELATED TO THIS AGREEMENT MORE THAN ONE (1) YEAR AFTER THE OCCURRENCE OF THE +EVENT GIVING RISE TO THE LIABILITY. + + +12. EXPORT +The Software, Documentation and related technical data may be subject +to U.S. export control laws, including without limitation the U.S. Export +Administration Act and its associated regulations, and may be subject to export +or import regulations in other countries. Licensee shall comply with all such +regulations and agrees to obtain all necessary licenses to export, re-export, or +import the Software, Documentation and related technical data. + + +13. GENERAL + +13.1 No Agency. Kasten and Licensee each acknowledge and agree that the +relationship established by this Agreement is that of independent contractors, +and nothing contained in this Agreement shall be construed to: (1) give either +party the power to direct or control the daytoday activities of the other; (2) +deem the parties to be acting as partners, joint venturers, coowners or +otherwise as participants in a joint undertaking; or (3) permit either party or +any of either party"s officers, directors, employees, agents or representatives +to create or assume any obligation on behalf of or for the account of the other +party for any purpose whatsoever. + +13.2 Compliance with Laws. Each party agrees to comply with all applicable +laws, regulations, and ordinances relating to their performance hereunder. +Without limiting the foregoing, Licensee warrants and covenants that it will +comply with all then current laws and regulations of the United States and other +jurisdictions relating or applicable to Licensee"s use of the Software and +Documentation including, without limitation, those concerning Intellectual +Property Rights, invasion of privacy, defamation, and the import and export of +Software and Documentation. + +13.3 Force Majeure. Except for the duty to pay money, neither party shall be +liable hereunder by reason of any failure or delay in the performance of its +obligations hereunder on account of strikes, riots, fires, flood, storm, +explosions, acts of God, war, governmental action, earthquakes, or any other +cause which is beyond the reasonable control of such party. + +13.4 Governing Law; Venue and Jurisdiction. This Agreement shall be interpreted +according to the laws of the State of California without regard to or +application of choiceoflaw rules or principles. The parties expressly agree +that the United Nations Convention on Contracts for the International Sale of +Goods and the Uniform Computer Information Transactions Act will not apply. Any +legal action or proceeding arising under this Agreement will be brought +exclusively in the federal or state courts located in Santa Clara County, +California and the parties hereby consent to the personal jurisdiction and venue +therein. + +13.5 Injunctive Relief. The parties agree that monetary damages would not be an +adequate remedy for the breach of certain provisions of this Agreement, +including, without limitation, all provisions concerning infringement, +confidentiality and nondisclosure, or limitation on permitted use of the +Software or Documentation. The parties further agree that, in the event of such +breach, injunctive relief would be necessary to prevent irreparable injury. +Accordingly, either party shall have the right to seek injunctive relief or +similar equitable remedies to enforce such party's rights under the pertinent +provisions of this Agreement, without limiting its right to pursue any other +legal remedies available to it. + +13.6 Entire Agreement and Waiver. This Agreement and any exhibits hereto shall +constitute the entire agreement and contains all terms and conditions between +Kasten and Licensee with respect to the subject matter hereof and all prior +agreements, representations, and statement with respect to such subject matter +are superseded hereby. This Agreement may be changed only by written agreement +signed by both Kasten and Licensee. No failure of either party to exercise or +enforce any of its rights under this Agreement shall act as a waiver of +subsequent breaches; and the waiver of any breach shall not act as a waiver of +subsequent breaches. + +13.7 Severability. In the event any provision of this Agreement is held by a +court or other tribunal of competent jurisdiction to be unenforceable, that +provision will be enforced to the maximum extent permissible under applicable +law and the other provisions of this Agreement will remain in full force and +effect. The parties further agree that in the event such provision is an +essential part of this Agreement, they will begin negotiations for a suitable +replacement provision. + +13.8 Counterparts. This Agreement may be executed in any number of +counterparts, each of which, when so executed and delivered (including by +facsimile), shall be deemed an original, and all of which shall constitute one +and the same agreement. + +13.9 Binding Effect. This Agreement shall be binding upon and shall inure to +the benefit of the respective parties hereto, their respective successors and +permitted assigns. + +13.10 Assignment. Neither party may, without the prior written consent of the +other party (which shall not be unreasonably withheld), assign this Agreement, +in whole or in part, either voluntarily or by operation of law, and any attempt +to do so shall be a material default of this Agreement and shall be void. +Notwithstanding the foregoing, Kasten may assign its rights and benefits and +delegate its duties and obligations under this Agreement without the consent of +Licensee in connection with a merger, reorganization or sale of all or +substantially all relevant assets of the assigning party; in each case provided +that such successor assumes the assigning party"s obligations under this +Agreement. + diff --git a/charts/k10/k10/4.5.1400/files/favicon.png b/charts/k10/k10/4.5.1400/files/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..fb617ce12c6949ed2dd1bec208c179644bcec0d4 GIT binary patch literal 1802 zcmY*adt8!d8-9^Q;U~4MX*yHXJYWfUAO(S@qG@7bGxIP_4N#l{pHRT^D`su#Je8*P z)jUsR^SG>~RHnI>c|c94qVrIzB^}VzOi6TLef53s^LwBBdhYvruIstqKb|*(x_>Vm zW(orU0PgQcKB$Qp?W+&b%!hTB(=-9ZJ-F8ksFRr~Gz%&{)SnR;2smi4KA;0K1i)H~ zW&mkSV8c2F09#E20B|YjW3^Q0LlsjB{)n~2`_H@#H6mfm;80#@AO(MvorH>^v192d zK@vwx00;uS1}4#YF$h6YB8!U`5Uti3cn#L3(N>6c3hyhTRcIg;;muB_Bd{n}6vm1K zLm&`@WEum1knH<@yJkhSis$h-cr=>N=cD*8D0Xrj+6jllp)t;AXJD;5qOb(C9W+Ak?F|q7pJffAA*673Y?wmX(E=lC6DR@JYxrwGA?Yh`eb0}ft~q6 zS-@*H1iGV44=Z|xFY0HMKAkSj8vYM_RRZ#kFlhRKW%GYVP4jgFbD><&8I zv^XWN3}J45EHCNcSEWmiz#JQ&b_X2DY7W>@rOIn$xI_AF1rucFE!8<*l-JhgP|zUu z+v2>;RYq7)Qz|9=2ic-oiBNT`YySLfGuu*-{BD{G zZ`#7`+d>_8F5q}{&U~MZ3}zw|pk|7l4gHa4YjiMY7q8V@-1>riyq$4w9JQ$$Ns6tq zxd-``i1XUF(Zhxs5>eTjYk5i4=>QP3)TGI-P};Tjlnsrg>Ob|`j%;5zFDWN%Z%y}> zY-2O{QzLFqHcH_=D4@)S!_(U(fBl~u1%3WwFcSoL%=~ti|K!Gg=!?<&BiS1cWo-Ue zp`<5vcckA#0?*<~V+R9zV$)}H3(GPURF7yqh^aJg<5$e=A_Gg=tKXdR^>+X-8i@05 z-W|YKG@-|>d@Rl0EXk!sWgcb=S=69-eXoeAWmDQo`1zqnC%X~N=Q=;%4Jswh+Z*A5 z_QLU|!F?nmyQ=b@PNZ>RYn00lH7DvV?U|jj4cUb+==2H!O$R1Yl8wWsOWOscs1$Bw zfc<=a`~uD1BPLFG({Y3AcZa2KGL47TpB~`fk?V@~uPwJt!HuqMf)rtIAKWE$l-m4I z-8}Vmw4&_^O#!OXE6P{?)(Ar|cJ$gmhpaFSa!;`8Pg<|Un<-(>JS+8#5^g`R6}5@= zzOv3hZ|;|;hDWxNbDT~90D zl9pZ@$4O0bmu6AVD3Q&bJ}tfmgsRiZViC;Sy?NhWUV}}+6_;d(vWq`)XZE-`>^laO zJaBz=d5^@Wk`L}45!R+%ABk{HD(>Tmhm)Y6qZt&F7+WRS-7>Rl^~aR?HcHXO0`D7- z|2?nnC98m0i_>!5t;H_a{jzpHtzcD`KaNE9%mjUU_f3WQ!tf#S@TpG-m{oq%DKX5c z`Ra+*XdEGM1-sL%7~UOCaiTAM7Ylh*66f)}w|=c*El=@R!izK=A4n%&O>e+nGDg0% tn7HN~o~pnSpxO)Prh(w&4OH~a8i>le=(TeuS4aD2@%PzJuJejc{Re;E|Hl9T literal 0 HcmV?d00001 diff --git a/charts/k10/k10/4.5.1400/files/kasten-logo.svg b/charts/k10/k10/4.5.1400/files/kasten-logo.svg new file mode 100644 index 000000000..0d0ef14ee --- /dev/null +++ b/charts/k10/k10/4.5.1400/files/kasten-logo.svg @@ -0,0 +1,24 @@ + + + + + + diff --git a/charts/k10/k10/4.5.1400/files/styles.css b/charts/k10/k10/4.5.1400/files/styles.css new file mode 100644 index 000000000..2d9205711 --- /dev/null +++ b/charts/k10/k10/4.5.1400/files/styles.css @@ -0,0 +1,113 @@ +.theme-body { + background-color: #efefef; + color: #333; + font-family: 'Source Sans Pro', Helvetica, sans-serif; +} + +.theme-navbar { + background-color: #fff; + box-shadow: 0 2px 2px rgba(0, 0, 0, 0.2); + color: #333; + font-size: 13px; + font-weight: 100; + height: 46px; + overflow: hidden; + padding: 0 10px; +} + +.theme-navbar__logo-wrap { + display: inline-block; + height: 100%; + overflow: hidden; + padding: 10px 15px; + width: 300px; +} + +.theme-navbar__logo { + height: 100%; + max-height: 25px; +} + +.theme-heading { + font-size: 20px; + font-weight: 500; + margin-bottom: 10px; + margin-top: 0; +} + +.theme-panel { + background-color: #fff; + box-shadow: 0 5px 15px rgba(0, 0, 0, 0.5); + padding: 30px; +} + +.theme-btn-provider { + background-color: #fff; + color: #333; + min-width: 250px; +} + +.theme-btn-provider:hover { + color: #999; +} + +.theme-btn--primary { + background-color: #333; + border: none; + color: #fff; + min-width: 200px; + padding: 6px 12px; +} + +.theme-btn--primary:hover { + background-color: #666; + color: #fff; +} + +.theme-btn--success { + background-color: #2FC98E; + color: #fff; + width: 250px; +} + +.theme-btn--success:hover { + background-color: #49E3A8; +} + +.theme-form-row { + display: block; + margin: 20px auto; +} + +.theme-form-input { + border-radius: 4px; + border: 1px solid #CCC; + box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); + color: #666; + display: block; + font-size: 14px; + height: 36px; + line-height: 1.42857143; + margin: auto; + padding: 6px 12px; + width: 250px; +} + +.theme-form-input:focus, +.theme-form-input:active { + border-color: #66AFE9; + outline: none; +} + +.theme-form-label { + font-size: 13px; + font-weight: 600; + margin: 4px auto; + position: relative; + text-align: left; + width: 250px; +} + +.theme-link-back { + margin-top: 4px; +} diff --git a/charts/k10/k10/4.5.1400/license b/charts/k10/k10/4.5.1400/license new file mode 100644 index 000000000..fb23dbb82 --- /dev/null +++ b/charts/k10/k10/4.5.1400/license @@ -0,0 +1 @@ +Y3VzdG9tZXJOYW1lOiBzdGFydGVyLWxpY2Vuc2UKZGF0ZUVuZDogJzIxMDAtMDEtMDFUMDA6MDA6MDAuMDAwWicKZGF0ZVN0YXJ0OiAnMjAyMC0wMS0wMVQwMDowMDowMC4wMDBaJwpmZWF0dXJlczogbnVsbAppZDogc3RhcnRlci00ZjE4NDJjMC0wNzQ1LTQxYTUtYWFhNy1hMDFkNzQ4YjFjMzAKcHJvZHVjdDogSzEwCnJlc3RyaWN0aW9uczoKICBub2RlczogJzEwJwpzZXJ2aWNlQWNjb3VudEtleTogbnVsbAp2ZXJzaW9uOiB2MS4wLjAKc2lnbmF0dXJlOiBqT1N5NDNQZG5ZMFVCZitValhOdU1oUEFSb1J2ZkpzWElQWnhBWFNCaGpKbUwxNlNodi8vVzgyV2NMeGZJM25NZTA0TThtRU03eThPcnArQks1ekxpeFd3clpncmZSbTBEaWlELyttRjR5U3l1Rko0QW1neHV6NDhQTmdnU1VyWUM3S1FVcFYxSEJZV1ZaNm9udEJDeE1rVWtkaDVqdzZJdWMzN3lDaktIYy92bWZaenBzTVhybmxUdGhha2RjVVk0azNyVHJDa3VDcnFUMkpjM1o1amFGalZSZW1Zd1NBVXpkRldNazdQdkp3eHVFdE5rNitPV0pCVERQbnNYdldKdjdNc3NneDBJTmdtNUlJWDRVeEVhQWI4QXpTNkMyQ21XQzlhWURFTDg1aEFpeWhONXUwU0tQczA3ZXB0R1VHYmc3cWtPUVN0d0NhcDFKUURvbDVDT0E9PQo= diff --git a/charts/k10/k10/4.5.1400/questions.yaml b/charts/k10/k10/4.5.1400/questions.yaml new file mode 100644 index 000000000..713fcb116 --- /dev/null +++ b/charts/k10/k10/4.5.1400/questions.yaml @@ -0,0 +1,295 @@ +questions: +# ======================== +# SECRETS And Configuration +# ======================== + +### AWS Configuration + +- variable: secrets.awsAccessKeyId + description: "AWS access key ID (required for AWS deployment)" + type: password + label: AWS Access Key ID + required: false + group: "AWS Configuration" + +- variable: secrets.awsSecretAccessKey + description: "AWS access key secret (required for AWS deployment)" + type: password + label: AWS Secret Access Key + required: false + group: "AWS Configuration" + +- variable: secrets.awsIamRole + description: "ARN of the AWS IAM role assumed by K10 to perform any AWS operation." + type: string + label: ARN of the AWS IAM role + required: false + group: "AWS Configuration" + +- variable: awsConfig.assumeRoleDuration + description: "Duration of a session token generated by AWS for an IAM role" + type: string + label: Role Duration + required: false + default: "" + group: "AWS Configuration" + +- variable: awsConfig.efsBackupVaultName + description: "Specifies the AWS EFS backup vault name" + type: string + label: EFS Backup Vault Name + required: false + default: "k10vault" + group: "AWS Configuration" + +### Google Cloud Configuration + +- variable: secrets.googleApiKey + description: "Required If cluster is deployed on Google Cloud" + type: multiline + label: Non-default base64 encoded GCP Service Account key file + required: false + group: "GoogleApi Configuration" + +### Azure Configuration + +- variable: secrets.azureTenantId + description: "Azure tenant ID (required for Azure deployment)" + type: string + label: Tenant ID + required: false + group: "Azure Configuration" + +- variable: secrets.azureClientId + description: "Azure Service App ID" + type: password + label: Service App ID + required: false + group: "Azure Configuration" + +- variable: secrets.azureClientSecret + description: "Azure Service App secret" + type: password + label: Service App secret + required: false + group: "Azure Configuration" + +- variable: secrets.azureResourceGroup + description: "Resource Group name that was created for the Kubernetes cluster" + type: string + label: Resource Group + required: false + group: "Azure Configuration" + +- variable: secrets.azureSubscriptionID + description: "Subscription ID in your Azure tenant" + type: string + label: Subscription ID + required: false + group: "Azure Configuration" + +- variable: secrets.azureResourceMgrEndpoint + description: "Resource management endpoint for the Azure Stack instance" + type: string + label: Resource management endpoint + required: false + group: "Azure Configuration" + +- variable: secrets.azureADEndpoint + description: "Azure Active Directory login endpoint" + type: string + label: Active Directory login endpoint + required: false + group: "Azure Configuration" + +- variable: secrets.azureADResourceID + description: "Azure Active Directory resource ID to obtain AD tokens" + type: string + label: Active Directory resource ID + required: false + group: "Azure Configuration" + +# ======================== +# Authentication +# ======================== + +- variable: auth.basicAuth.enabled + description: "Configures basic authentication for the K10 dashboard" + type: boolean + label: Enable Basic Authentication + required: false + group: "Authentication" + show_subquestion_if: true + subquestions: + - variable: auth.basicAuth.htpasswd + description: "A username and password pair separated by a colon character" + type: password + label: Authentication Details (htpasswd) + - variable: auth.basicAuth.secretName + description: "Name of an existing Secret that contains a file generated with htpasswd" + type: string + label: Secret Name + +- variable: auth.tokenAuth.enabled + description: "Configures token based authentication for the K10 dashboard" + type: boolean + label: Enable Token Based Authentication + required: false + group: "Authentication" + +- variable: auth.oidcAuth.enabled + description: "Configures Open ID Connect based authentication for the K10 dashboard" + type: boolean + label: Enable OpenID Connect Based Authentication + required: false + group: "Authentication" + show_subquestion_if: true + subquestions: + - variable: auth.oidcAuth.providerURL + description: "URL for the OIDC Provider" + type: string + label: OIDC Provider URL + - variable: auth.oidcAuth.redirectURL + description: "URL for the K10 gateway Provider" + type: string + label: OIDC Redirect URL + - variable: auth.oidcAuth.scopes + description: "Space separated OIDC scopes required for userinfo. Example: `profile email`" + type: string + label: OIDC scopes + - variable: auth.oidcAuth.prompt + description: "The type of prompt to be used during authentication (none, consent, login, or select_account)" + type: enum + options: + - none + - consent + - login + - select_account + default: none + label: The type of prompt to be used during authentication (none, consent, login, or select_account) + - variable: auth.oidcAuth.clientID + description: "Client ID given by the OIDC provider for K10" + type: password + label: OIDC Client ID + - variable: auth.oidcAuth.clientSecret + description: "Client secret given by the OIDC provider for K10" + type: password + label: OIDC Client Secret + - variable: auth.oidcAuth.usernameClaim + description: "The claim to be used as the username" + type: string + label: OIDC UserName Claim + - variable: auth.oidcAuth.usernamePrefix + description: "Prefix that has to be used with the username obtained from the username claim" + type: string + label: OIDC UserName Prefix + - variable: auth.oidcAuth.groupClaim + description: "Name of a custom OpenID Connect claim for specifying user groups" + type: string + label: OIDC group Claim + - variable: auth.oidcAuth.groupPrefix + description: "All groups will be prefixed with this value to prevent conflicts" + type: string + label: OIDC group Prefix + +# ======================== +# External Gateway +# ======================== + +- variable: externalGateway.create + description: "Configures an external gateway for K10 API services" + type: boolean + label: Create External Gateway + required: false + group: "External Gateway" + show_subquestion_if: true + subquestions: + - variable: externalGateway.annotations + description: "Standard annotations for the services" + type: multiline + default: "" + label: Annotation + - variable: externalGateway.fqdn.name + description: "Domain name for the K10 API services" + type: string + label: Domain Name + - variable: externalGateway.fqdn.type + description: "Supported gateway type: `route53-mapper` or `external-dns`" + type: string + label: Gateway Type route53-mapper or external-dns + - variable: externalGateway.awsSSLCertARN + description: "ARN for the AWS ACM SSL certificate used in the K10 API server" + type: multiline + label: ARN for the AWS ACM SSL certificate + +# ======================== +# Storage Management +# ======================== + +- variable: global.persistence.storageClass + label: StorageClass Name + description: "Specifies StorageClass Name to be used for PVCs" + type: string + required: false + default: "" + group: "Storage Management" + +- variable: prometheus.server.persistentVolume.storageClass + type: string + label: StorageClass Name for Prometheus PVC + description: "StorageClassName used to create Prometheus PVC. Setting this option overwrites global StorageClass value" + default: "" + required: false + group: "Storage Management" + +- variable: prometheus.server.persistentVolume.enabled + type: boolean + label: Enable PVC for Prometheus server + description: "If true, K10 Prometheus server will create a Persistent Volume Claim" + default: true + required: false + group: "Storage Management" + +- variable: global.persistence.enabled + type: boolean + label: Storage Enabled + description: "If true, K10 will use Persistent Volume Claim" + default: true + required: false + group: "Storage Management" + +# ======================== +# Service Account +# ======================== + +- variable: serviceAccount.name + description: "Name of a service account in the target namespace that has cluster-admin permissions. This is needed for the K10 to be able to protect cluster resources." + type: string + label: Service Account Name + required: false + group: "Service Account" + +# ======================== +# License +# ======================== + +- variable: license + description: "License string obtained from Kasten" + type: multiline + label: License String + group: "License" +- variable: eula.accept + description: "Whether to enable accept EULA before installation" + type: boolean + label: Enable accept EULA before installation + group: "License" + show_subquestion_if: true + subquestions: + - variable: eula.company + description: "Company name. Required field if EULA is accepted" + type: string + label: Company Name + - variable: eula.email + description: "Contact email. Required field if EULA is accepted" + type: string + label: Contact Email diff --git a/charts/k10/k10/4.5.1400/templates/NOTES.txt b/charts/k10/k10/4.5.1400/templates/NOTES.txt new file mode 100644 index 000000000..240f3062d --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/NOTES.txt @@ -0,0 +1,47 @@ +Thank you for installing Kasten’s K10 Data Management Platform! + +Documentation can be found at https://docs.kasten.io/. + +How to access the K10 Dashboard: + +{{ if .Values.ingress.create }} +You are using the system's default ingress controller. Please ask your +administrator for instructions on how to access the cluster. + +WebUI location: https://{{ default "Your ingress endpoint" .Values.ingress.host }}/{{ default .Release.Name .Values.ingress.urlPath }} +{{ end }} + +The K10 dashboard is not exposed externally. To establish a connection to it use the following `kubectl` command: + +`kubectl --namespace {{ .Release.Namespace }} port-forward service/gateway 8080:{{ .Values.service.externalPort }}` + +The Kasten dashboard will be available at: `http{{ if or (and .Values.secrets.apiTlsCrt .Values.secrets.apiTlsKey) .Values.externalGateway.awsSSLCertARN }}s{{ end }}://127.0.0.1:8080/{{ .Release.Name }}/#/` + +{{ if.Values.externalGateway.create }} +{{ if .Values.externalGateway.fqdn.name }} + +The K10 Dashboard is accessible via {{ if or (and .Values.secrets.apiTlsCrt .Values.secrets.apiTlsKey) .Values.externalGateway.awsSSLCertARN }}https{{ else }}http{{ end }}://{{ .Values.externalGateway.fqdn.name }}/{{ .Release.Name }}/#/ + +{{ else }} + +The K10 Dashboard is accessible via a LoadBalancer. Find the service's EXTERNAL IP using: + `kubectl get svc gateway-ext --namespace {{ .Release.Namespace }} -o wide` +And use it in following URL + `http://SERVICE_EXTERNAL_IP/{{ .Release.Name }}/#/` +{{ end }} +{{ end }} + +{{ if and ( .Values.metering.awsManagedLicense ) ( not .Values.metering.licenseConfigSecretName ) }} + +IAM Role created during installation need to have permissions that allow K10 to +perform operations on EBS and, if needed, EFS and S3. Please create a policy +with required permissions, and use the commands below to attach the policy to +the service account. + +`ROLE_NAME=$(kubectl get serviceaccount {{ .Values.serviceAccount.name }} -n {{ .Release.Namespace }} -ojsonpath="{.metadata.annotations['eks\.amazonaws\.com/role-arn']}" | awk -F '/' '{ print $(NF) }')` +`aws iam attach-role-policy --role-name "${ROLE_NAME}" --policy-arn ` + +Refer to `https://docs.kasten.io/latest/install/aws-containers-anywhere/aws-containers-anywhere.html#attaching-permissions-for-eks-installations` +for more information. + +{{ end }} \ No newline at end of file diff --git a/charts/k10/k10/4.5.1400/templates/_definitions.tpl b/charts/k10/k10/4.5.1400/templates/_definitions.tpl new file mode 100644 index 000000000..6b15fd6bc --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/_definitions.tpl @@ -0,0 +1,184 @@ +{{/* Autogenerated, do NOT modify */}} +{{- define "k10.additionalServices" -}}frontend kanister {{- end -}} +{{- define "k10.restServices" -}}admin auth bloblifecyclemanager catalog crypto dashboardbff events executor jobs logging metering state vbrintegrationapi {{- end -}} +{{- define "k10.services" -}}aggregatedapis config {{- end -}} +{{- define "k10.exposedServices" -}}auth dashboardbff vbrintegrationapi {{- end -}} +{{- define "k10.statelessServices" -}}admin aggregatedapis auth bloblifecyclemanager crypto dashboardbff events executor state vbrintegrationapi {{- end -}} +{{- define "k10.colocatedServices" -}}admin: + isExposed: false + port: 8001 + primary: state +bloblifecyclemanager: + isExposed: true + port: 8001 + primary: crypto +events: + isExposed: true + port: 8002 + primary: crypto +vbrintegrationapi: + isExposed: true + port: 8001 + primary: dashboardbff +{{- end -}} +{{- define "k10.colocatedServiceLookup" -}}crypto: +- bloblifecyclemanager +- events +dashboardbff: +- vbrintegrationapi +state: +- admin +{{- end -}} +{{- define "k10.aggregatedAPIs" -}}actions apps vault {{- end -}} +{{- define "k10.configAPIs" -}}config{{- end -}} +{{- define "k10.profiles" -}}profiles{{- end -}} +{{- define "k10.policies" -}}policies{{- end -}} +{{- define "k10.reportingAPIs" -}}reporting{{- end -}} +{{- define "k10.distAPIs" -}}dist{{- end -}} +{{- define "k10.actionsAPIs" -}}actions{{- end -}} +{{- define "k10.backupActions" -}}backupactions{{- end -}} +{{- define "k10.backupActionsDetails" -}}backupactions/details{{- end -}} +{{- define "k10.reportActions" -}}reportactions{{- end -}} +{{- define "k10.reportActionsDetails" -}}reportactions/details{{- end -}} +{{- define "k10.restoreActions" -}}restoreactions{{- end -}} +{{- define "k10.restoreActionsDetails" -}}restoreactions/details{{- end -}} +{{- define "k10.importActions" -}}importactions{{- end -}} +{{- define "k10.exportActions" -}}exportactions{{- end -}} +{{- define "k10.exportActionsDetails" -}}exportactions/details{{- end -}} +{{- define "k10.retireActions" -}}retireactions{{- end -}} +{{- define "k10.runActions" -}}runactions{{- end -}} +{{- define "k10.backupClusterActions" -}}backupclusteractions{{- end -}} +{{- define "k10.backupClusterActionsDetails" -}}backupclusteractions/details{{- end -}} +{{- define "k10.restoreClusterActions" -}}restoreclusteractions{{- end -}} +{{- define "k10.restoreClusterActionsDetails" -}}restoreclusteractions/details{{- end -}} +{{- define "k10.cancelActions" -}}cancelactions{{- end -}} +{{- define "k10.appsAPIs" -}}apps{{- end -}} +{{- define "k10.restorePoints" -}}restorepoints{{- end -}} +{{- define "k10.restorePointsDetails" -}}restorepoints/details{{- end -}} +{{- define "k10.clusterRestorePoints" -}}clusterrestorepoints{{- end -}} +{{- define "k10.clusterRestorePointsDetails" -}}clusterrestorepoints/details{{- end -}} +{{- define "k10.applications" -}}applications{{- end -}} +{{- define "k10.applicationsDetails" -}}applications/details{{- end -}} +{{- define "k10.vaultAPIs" -}}vault{{- end -}} +{{- define "k10.passkey" -}}passkeys{{- end -}} +{{- define "k10.authAPIs" -}}auth{{- end -}} +{{- define "k10.defaultConcurrentSnapshotConversions" -}}3{{- end -}} +{{- define "k10.defaultConcurrentWorkloadSnapshots" -}}5{{- end -}} +{{- define "k10.defaultK10DataStoreParallelUpload" -}}8{{- end -}} +{{- define "k10.defaultK10DataStoreGeneralContentCacheSizeMB" -}}0{{- end -}} +{{- define "k10.defaultK10DataStoreGeneralMetadataCacheSizeMB" -}}500{{- end -}} +{{- define "k10.defaultK10DataStoreRestoreContentCacheSizeMB" -}}500{{- end -}} +{{- define "k10.defaultK10DataStoreRestoreMetadataCacheSizeMB" -}}500{{- end -}} +{{- define "k10.defaultK10BackupBufferFileHeadroomFactor" -}}1.1{{- end -}} +{{- define "k10.defaultK10LimiterGenericVolumeSnapshots" -}}10{{- end -}} +{{- define "k10.defaultK10LimiterGenericVolumeCopies" -}}10{{- end -}} +{{- define "k10.defaultK10LimiterGenericVolumeRestores" -}}10{{- end -}} +{{- define "k10.defaultK10LimiterCsiSnapshots" -}}10{{- end -}} +{{- define "k10.defaultK10LimiterProviderSnapshots" -}}10{{- end -}} +{{- define "k10.defaultAssumeRoleDuration" -}}60m{{- end -}} +{{- define "k10.defaultKanisterBackupTimeout" -}}45{{- end -}} +{{- define "k10.defaultKanisterRestoreTimeout" -}}600{{- end -}} +{{- define "k10.defaultKanisterDeleteTimeout" -}}45{{- end -}} +{{- define "k10.defaultKanisterHookTimeout" -}}20{{- end -}} +{{- define "k10.defaultKanisterCheckRepoTimeout" -}}20{{- end -}} +{{- define "k10.defaultKanisterStatsTimeout" -}}20{{- end -}} +{{- define "k10.defaultKanisterEFSPostRestoreTimeout" -}}45{{- end -}} +{{- define "k10.cloudProviders" -}} aws google azure {{- end -}} +{{- define "k10.serviceResources" -}} +admin-svc: + admin-svc: + requests: + cpu: 2m + memory: 160Mi +aggregatedapis-svc: + aggregatedapis-svc: + requests: + cpu: 90m + memory: 180Mi +auth-svc: + auth-svc: + requests: + cpu: 2m + memory: 30Mi +bloblifecyclemanager-svc: + bloblifecyclemanager-svc: + requests: + cpu: 10m + memory: 40Mi +catalog-svc: + catalog-svc: + requests: + cpu: 200m + memory: 780Mi + kanister-sidecar: + limits: + cpu: 1200m + memory: 800Mi + requests: + cpu: 100m + memory: 800Mi +config-svc: + config-svc: + requests: + cpu: 5m + memory: 30Mi +crypto-svc: + crypto-svc: + requests: + cpu: 1m + memory: 30Mi +dashboardbff-svc: + dashboardbff-svc: + requests: + cpu: 8m + memory: 40Mi +events-svc: + events-svc: + requests: + cpu: 3m + memory: 500Mi +executor-svc: + executor-svc: + requests: + cpu: 3m + memory: 50Mi + tools: + requests: + cpu: 1m + memory: 2Mi +frontend-svc: + frontend-svc: + requests: + cpu: 1m + memory: 40Mi +jobs-svc: + jobs-svc: + requests: + cpu: 30m + memory: 380Mi +kanister-svc: + kanister-svc: + requests: + cpu: 1m + memory: 30Mi +logging-svc: + logging-svc: + requests: + cpu: 2m + memory: 40Mi +metering-svc: + metering-svc: + requests: + cpu: 2m + memory: 30Mi +state-svc: + state-svc: + requests: + cpu: 2m + memory: 30Mi +{{- end -}} +{{- define "k10.multiClusterVersion" -}}2{{- end -}} +{{- define "k10.ambassadorImageTag" -}}2.2.2{{- end -}} +{{- define "k10.kanisterToolsImageTag" -}}0.78.0{{- end -}} +{{- define "k10.dexImageTag" -}}v2.24.0{{- end -}} +{{- define "k10.rhAmbassadorImageTag" -}}2.1.2{{- end -}} diff --git a/charts/k10/k10/4.5.1400/templates/_helpers.tpl b/charts/k10/k10/4.5.1400/templates/_helpers.tpl new file mode 100644 index 000000000..ef792e849 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/_helpers.tpl @@ -0,0 +1,647 @@ +{{/* Check if basic auth is needed */}} +{{- define "basicauth.check" -}} + {{- if .Values.auth.basicAuth.enabled }} + {{- print true }} + {{- end -}} {{/* End of check for auth.basicAuth.enabled */}} +{{- end -}} + +{{/* +Check if trusted root CA certificate related configmap settings +have been configured +*/}} +{{- define "check.cacertconfigmap" -}} +{{- if .Values.cacertconfigmap.name -}} +{{- print true -}} +{{- else -}} +{{- print false -}} +{{- end -}} +{{- end -}} + +{{/* +Check if the auth options are implemented using Dex +*/}} +{{- define "check.dexAuth" -}} +{{- if or .Values.auth.openshift.enabled .Values.auth.ldap.enabled -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* Check the only 1 auth is specified */}} +{{- define "singleAuth.check" -}} +{{- $count := dict "count" (int 0) -}} +{{- $authList := list .Values.auth.basicAuth.enabled .Values.auth.tokenAuth.enabled .Values.auth.oidcAuth.enabled .Values.auth.openshift.enabled .Values.auth.ldap.enabled -}} +{{- range $i, $val := $authList }} +{{ if $val }} +{{ $c := add1 $count.count | set $count "count" }} +{{ if gt $count.count 1 }} +{{- fail "Multiple auth types were selected. Only one type can be enabled." }} +{{ end }} +{{ end }} +{{- end }} +{{- end -}}{{/* Check the only 1 auth is specified */}} + +{{/* Check if Auth is enabled */}} +{{- define "authEnabled.check" -}} +{{- $count := dict "count" (int 0) -}} +{{- $authList := list .Values.auth.basicAuth.enabled .Values.auth.tokenAuth.enabled .Values.auth.oidcAuth.enabled .Values.auth.openshift.enabled .Values.auth.ldap.enabled -}} +{{- range $i, $val := $authList }} +{{ if $val }} +{{ $c := add1 $count.count | set $count "count" }} +{{ end }} +{{- end }} +{{- if eq $count.count 0}} + {{- fail "Auth is required to expose access to K10." }} +{{- end }} +{{- end -}}{{/*end of check */}} + +{{/* Return ingress class name annotation */}} +{{- define "ingressClassAnnotation" -}} +{{- if .Values.ingress.class -}} +kubernetes.io/ingress.class: {{ .Values.ingress.class | quote }} +{{- end -}} +{{- end -}} + +{{/* Helm required labels */}} +{{- define "helm.labels" -}} +heritage: {{ .Release.Service }} +helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }} +app.kubernetes.io/name: {{ .Chart.Name }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{ include "k10.common.matchLabels" . }} +{{- end -}} + +{{- define "k10.common.matchLabels" -}} +app: {{ .Chart.Name }} +release: {{ .Release.Name }} +{{- end -}} + +{{- define "k10.defaultRBACLabels" -}} +k10.kasten.io/default-rbac-object: "true" +{{- end -}} + +{{/* Expand the name of the chart. */}} +{{- define "name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "serviceAccountName" -}} +{{- if and .Values.metering.awsMarketplace ( not .Values.serviceAccount.name ) -}} + {{ print "k10-metering" }} +{{- else if .Values.serviceAccount.create -}} + {{ default (include "fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the metering service account to use +*/}} +{{- define "meteringServiceAccountName" -}} +{{- if and .Values.metering.awsManagedLicense ( not .Values.serviceAccount.name ) ( not .Values.metering.serviceAccount.name ) ( not .Values.metering.licenseConfigSecretName ) -}} + {{ print "k10-metering" }} +{{- else -}} + {{ default (include "serviceAccountName" .) .Values.metering.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Prints annotations based on .Values.fqdn.type +*/}} +{{- define "dnsAnnotations" -}} +{{- if .Values.externalGateway.fqdn.name -}} +{{- if eq "route53-mapper" ( default "" .Values.externalGateway.fqdn.type) }} +domainName: {{ .Values.externalGateway.fqdn.name | quote }} +{{- end }} +{{- if eq "external-dns" (default "" .Values.externalGateway.fqdn.type) }} +external-dns.alpha.kubernetes.io/hostname: {{ .Values.externalGateway.fqdn.name | quote }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Prometheus scrape config template for k10 services +*/}} +{{- define "k10.prometheusScrape" -}} +{{- $admin_port := default 8877 .main.Values.service.gatewayAdminPort -}} +- job_name: {{ .k10service }} + metrics_path: /metrics + {{- if eq "aggregatedapis" .k10service }} + scheme: https + tls_config: + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- else }} + scheme: http + {{- end }} + static_configs: + - targets: + {{- if eq "gateway" .k10service }} + - {{ .k10service }}-admin.{{ .main.Release.Namespace }}.svc.{{ .main.Values.cluster.domainName }}:{{ $admin_port }} + {{- else if eq "aggregatedapis" .k10service }} + - {{ .k10service }}-svc.{{ .main.Release.Namespace }}.svc.{{ .main.Values.cluster.domainName }}:443 + {{- else }} + {{- $service := default .k10service (index (include "k10.colocatedServices" . | fromYaml) .k10service).primary }} + {{- $port := default .main.Values.service.externalPort (index (include "k10.colocatedServices" . | fromYaml) .k10service).port }} + - {{ $service }}-svc.{{ .main.Release.Namespace }}.svc.{{ .main.Values.cluster.domainName }}:{{ $port }} + {{- end }} + labels: + application: {{ .main.Release.Name }} + service: {{ .k10service }} +{{- end -}} + +{{/* +Expands the name of the Prometheus chart. It is equivalent to what the +"prometheus.name" template does. It is needed because the referenced values in a +template are relative to where/when the template is called from, and not where +the template is defined at. This means that the value of .Chart.Name and +.Values.nameOverride are different depending on whether the template is called +from within the Prometheus chart or the K10 chart. +*/}} +{{- define "k10.prometheus.name" -}} +{{- default "prometheus" .Values.prometheus.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expands the name of the Prometheus service created to expose the prometheus server. +*/}} +{{- define "k10.prometheus.service.name" -}} +{{- default (printf "%s-%s-%s" .Release.Name "prometheus" .Values.prometheus.server.name) .Values.prometheus.server.fullnameOverride }} +{{- end -}} + +{{/* +Checks if EULA is accepted via cmd +Enforces eula.company and eula.email as required fields +returns configMap fields +*/}} +{{- define "k10.eula.fields" -}} +{{- if .Values.eula.accept -}} +accepted: "true" +company: {{ required "eula.company is required field if eula is accepted" .Values.eula.company }} +email: {{ required "eula.email is required field if eula is accepted" .Values.eula.email }} +{{- else -}} +accepted: "" +company: "" +email: "" +{{- end }} +{{- end -}} + +{{/* +Helper to determine the API Domain +*/}} +{{- define "apiDomain" -}} +{{- if .Values.useNamespacedAPI -}} +kio.{{- replace "-" "." .Release.Namespace -}} +{{- else -}} +kio.kasten.io +{{- end -}} +{{- end -}} + +{{/* +Get dex image, if user wants to +install certified version of upstream +images or not +*/}} +{{- define "k10.dexImage" -}} +{{- if not .Values.rhMarketPlace }} +{{- printf "%s:%s" ( include "k10.dexImageRepo" . ) (include "k10.dexTag" .) }} +{{- else }} +{{- printf "%s" (get .Values.images "dex") }} +{{- end -}} +{{- end -}} + +{{/* +Get dex image repo based on conditions +if its airgapped and red hat images are +required +*/}} +{{- define "k10.dexImageRepo" -}} +{{- if .Values.global.upstreamCertifiedImages }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/dex" .Values.global.airgapped.repository }} +{{- else }} +{{- printf "%s/%s/dex" .Values.image.registry .Values.image.repository }} +{{- end}} +{{- else }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/dex" .Values.global.airgapped.repository }} +{{- else }} +{{- printf "%s/%s/%s" .Values.dexImage.registry .Values.dexImage.repository .Values.dexImage.image }} +{{- end }} +{{- end }} +{{- end -}} + +{{/* +Get dex image tag based on conditions +if its airgapped and red hat images are +required +*/}} +{{- define "k10.dexTag" -}} +{{- if .Values.global.upstreamCertifiedImages }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s-rh-ubi" (include "k10.dexImageTag" .) }} +{{- else }} +{{- printf "%s-rh-ubi" (include "k10.dexImageTag" .) }} +{{- end}} +{{- else }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s" (include "k10.dexImageTag" .) }} +{{- else }} +{{- printf "%s" (include "k10.dexImageTag" .) }} +{{- end }} +{{- end }} +{{- end -}} + +{{/* +Get ambassador image base on whether +we or not we are installing k10 on openshift +*/}} +{{- define "k10.ambImage" -}} +{{- if not .Values.global.rhMarketPlace }} +{{- printf "%s:%s" ( include "k10.ambImageRepo" .) (include "k10.ambImageTag" .) }} +{{- else }} +{{- printf "%s" (get .Values.global.images "emissary") }} +{{- end -}} +{{- end -}} + +{{- define "k10.ambImageRepo" -}} +{{- if .Values.global.upstreamCertifiedImages }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/emissary" .Values.global.airgapped.repository }} +{{- else }} +{{- printf "%s/%s/emissary" .Values.image.registry .Values.image.repository }} +{{- end }} +{{- else }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/emissary" .Values.global.airgapped.repository }} +{{- else }} +{{- printf "%s/%s/%s" .Values.ambassadorImage.registry .Values.ambassadorImage.repository .Values.ambassadorImage.image }} +{{- end }} +{{- end }} +{{- end -}} + +{{- define "k10.ambImageTag" -}} +{{- if .Values.global.upstreamCertifiedImages }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s-rh-ubi" (include "k10.rhAmbassadorImageTag" .) }} +{{- else }} +{{- printf "%s-rh-ubi" (include "k10.rhAmbassadorImageTag" .) }} +{{- end }} +{{- else }} +{{- if .Values.global.airgapped.repository }} +{{- printf "k10-%s" (include "k10.ambassadorImageTag" .) }} +{{- else }} +{{- printf "%s" (include "k10.ambassadorImageTag" .) }} +{{- end }} +{{- end }} +{{- end -}} + +{{/* +Check if AWS creds are specified +*/}} +{{- define "check.awscreds" -}} +{{- if or .Values.secrets.awsAccessKeyId .Values.secrets.awsSecretAccessKey -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* +Check if kanister-tools image has k10- in name +this means we need to overwrite kanister image in the system +*/}} +{{- define "overwite.kanisterToolsImage" -}} +{{- if or .Values.global.airgapped.repository .Values.global.rhMarketPlace -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* +Figure out the kanisterToolsImage.image based on +the value of airgapped.repository value +The details on how these image are being generated +is in below issue +https://kasten.atlassian.net/browse/K10-4036 +Using substr to remove repo from kanisterToolsImage +*/}} +{{- define "get.kanisterToolsImage" }} +{{- if not .Values.global.rhMarketPlace }} +{{- if .Values.global.airgapped.repository }} +{{- printf "%s/%s:k10-%s" (.Values.global.airgapped.repository) (.Values.kanisterToolsImage.image) (include "k10.kanisterToolsImageTag" .) -}} +{{- else }} +{{- printf "%s/%s/%s:%s" (.Values.kanisterToolsImage.registry) (.Values.kanisterToolsImage.repository) (.Values.kanisterToolsImage.image) (include "k10.kanisterToolsImageTag" .) -}} +{{- end }} +{{- else }} +{{- printf "%s" (get .Values.global.images "kanister-tools") -}} +{{- end }} +{{- end }} + +{{/* +Check if Google creds are specified +*/}} +{{- define "check.googlecreds" -}} +{{- if .Values.secrets.googleApiKey -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* +Check if IBM SL api key is specified +*/}} +{{- define "check.ibmslcreds" -}} +{{- if or .Values.secrets.ibmSoftLayerApiKey .Values.secrets.ibmSoftLayerApiUsername -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* +Check if Azure creds are specified +*/}} +{{- define "check.azurecreds" -}} +{{- if or (or .Values.secrets.azureTenantId .Values.secrets.azureClientId) .Values.secrets.azureClientSecret -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* +Check if Vsphere creds are specified +*/}} +{{- define "check.vspherecreds" -}} +{{- if or (or .Values.secrets.vsphereEndpoint .Values.secrets.vsphereUsername) .Values.secrets.vspherePassword -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* +Check if Vault creds are specified +*/}} +{{- define "check.vaultcreds" -}} +{{- if .Values.vault.secretName -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{/* +Checks and enforces only 1 set of cloud creds is specified +*/}} +{{- define "enforce.singlecloudcreds" -}} +{{- $count := dict "count" (int 0) -}} +{{- $main := . -}} +{{- range $ind, $cloud_provider := include "k10.cloudProviders" . | splitList " " }} +{{ if eq (include (printf "check.%screds" $cloud_provider) $main) "true" }} +{{ $c := add1 $count.count | set $count "count" }} +{{ if gt $count.count 1 }} +{{- fail "Credentials for different cloud providers were provided but only one is allowed. Please verify your .secrets.* values." }} +{{ end }} +{{ end }} +{{- end }} +{{- end -}} + +{{/* +Converts .Values.features into k10-features: map[string]: "value" +*/}} +{{- define "k10.features" -}} +{{ range $n, $v := .Values.features }} +{{ $n }}: {{ $v | quote -}} +{{ end }} +{{- end -}} + +{{/* +Returns a license base64 either from file or from values +or prints it for awsmarketplace or awsManagedLicense +*/}} +{{- define "k10.getlicense" -}} +{{- if .Values.metering.awsMarketplace -}} +{{- print "Y3VzdG9tZXJOYW1lOiBhd3MtbWFya2V0cGxhY2UKZGF0ZUVuZDogJzIxMDAtMDEtMDFUMDA6MDA6MDAuMDAwWicKZGF0ZVN0YXJ0OiAnMjAxOC0wOC0wMVQwMDowMDowMC4wMDBaJwpmZWF0dXJlczoKICBjbG91ZE1ldGVyaW5nOiBhd3MKaWQ6IGF3cy1ta3QtNWMxMDlmZDUtYWI0Yy00YTE0LWJiY2QtNTg3MGU2Yzk0MzRiCnByb2R1Y3Q6IEsxMApyZXN0cmljdGlvbnM6IG51bGwKdmVyc2lvbjogdjEuMC4wCnNpZ25hdHVyZTogY3ZEdTNTWHljaTJoSmFpazR3THMwTk9mcTNFekYxQ1pqLzRJMUZVZlBXS0JETHpuZmh2eXFFOGUvMDZxNG9PNkRoVHFSQlY3VFNJMzVkQzJ4alllaGp3cWwxNHNKT3ZyVERKZXNFWVdyMVFxZGVGVjVDd21HczhHR0VzNGNTVk5JQXVseGNTUG9oZ2x2UlRJRm0wVWpUOEtKTzlSTHVyUGxyRjlGMnpnK0RvM2UyTmVnamZ6eTVuMUZtd24xWUNlbUd4anhFaks0djB3L2lqSGlwTGQzWVBVZUh5Vm9mZHRodGV0YmhSUGJBVnVTalkrQllnRklnSW9wUlhpYnpTaEMvbCs0eTFEYzcyTDZXNWM0eUxMWFB1SVFQU3FjUWRiYnlwQ1dYYjFOT3B3aWtKMkpsR0thMldScFE4ZUFJNU9WQktqZXpuZ3FPa0lRUC91RFBtSXFBPT0K" -}} +{{- else if or ( .Values.metering.awsManagedLicense ) ( .Values.metering.licenseConfigSecretName ) -}} +{{- print "Y3VzdG9tZXJOYW1lOiBhd3MtdG90ZW0KZGF0ZUVuZDogJzIxMDAtMDEtMDFUMDA6MDA6MDAuMDAwWicKZGF0ZVN0YXJ0OiAnMjAyMS0wOS0wMVQwMDowMDowMC4wMDBaJwpmZWF0dXJlczoKICBleHRlcm5hbExpY2Vuc2U6IGF3cwogIHByb2R1Y3RTS1U6IGI4YzgyMWQ5LWJmNDAtNDE4ZC1iYTBiLTgxMjBiZjc3ZThmOQogIGtleUZpbmdlcnByaW50OiBhd3M6Mjk0NDA2ODkxMzExOkFXUy9NYXJrZXRwbGFjZTppc3N1ZXItZmluZ2VycHJpbnQKaWQ6IGF3cy1leHQtMWUxMTVlZjMtM2YyMC00MTJlLTgzODItMmE1NWUxMTc1OTFlCnByb2R1Y3Q6IEsxMApyZXN0cmljdGlvbnM6CiAgbm9kZXM6ICczJwp2ZXJzaW9uOiB2MS4wLjAKc2lnbmF0dXJlOiBkeEtLN3pPUXdzZFBOY2I1NExzV2hvUXNWeWZSVDNHVHZ0VkRuR1Vvb2VxSGlwYStTY25HTjZSNmdmdmtWdTRQNHh4RmV1TFZQU3k2VnJYeExOTE1RZmh2NFpBSHVrYmFNd3E5UXhGNkpGSmVXbTdzQmdtTUVpWVJ2SnFZVFcyMlNoakZEU1RWejY5c2JBTXNFMUd0VTdXKytITGk0dnhybjVhYkd6RkRHZW5iRE5tcXJQT3dSa3JIdTlHTFQ1WmZTNDFUL0hBMjNZZnlsTU54MGFlK2t5TGZvZXNuK3FKQzdld2NPWjh4eE94bFRJR3RuWDZ4UU5DTk5iYjhSMm5XbmljNVd0OElEc2VDR3lLMEVVRW9YL09jNFhsWVVra3FGQ0xPdVhuWDMxeFZNZ1NFQnVEWExFd3Y3K2RlSmcvb0pMaW9EVHEvWUNuM0lnem9VR2NTMGc9PQo=" -}} +{{- else -}} +{{- print (default (.Files.Get "license") .Values.license) -}} +{{- end -}} +{{- end -}} + +{{/* +Returns resource usage given a pod name and container name +*/}} +{{- define "k10.resource.request" -}} +{{- $resourceDefaultList := (include "k10.serviceResources" .main | fromYaml) }} +{{- $podName := .k10_service_pod_name }} +{{- $containerName := .k10_service_container_name }} +{{- $resourceValue := "" }} +{{- if (hasKey $resourceDefaultList $podName) }} + {{- $resourceValue = index (index $resourceDefaultList $podName) $containerName }} +{{- end }} +{{- if (hasKey .main.Values.resources $podName) }} + {{- if (hasKey (index .main.Values.resources $podName) $containerName) }} + {{- $resourceValue = index (index .main.Values.resources $podName) $containerName }} + {{- end }} +{{- end }} +{{- /* If no resource usage value was provided, do not include the resources section */}} +{{- /* This allows users to set unlimited resources by providing a service key that is empty (e.g. `--set resources.=`) */}} +{{- if $resourceValue }} +resources: +{{- $resourceValue | toYaml | trim | nindent 2 }} +{{- else if eq .main.Release.Namespace "default" }} +resources: + requests: + cpu: "0.01" +{{- end }} +{{- end -}} + +{{- define "kanisterToolsResources" }} +{{- if .Values.genericVolumeSnapshot.resources.requests.memory }} +KanisterToolsMemoryRequests: {{ .Values.genericVolumeSnapshot.resources.requests.memory | quote }} +{{- end }} +{{- if .Values.genericVolumeSnapshot.resources.requests.cpu }} +KanisterToolsCPURequests: {{ .Values.genericVolumeSnapshot.resources.requests.cpu | quote }} +{{- end }} +{{- if .Values.genericVolumeSnapshot.resources.limits.memory }} +KanisterToolsMemoryLimits: {{ .Values.genericVolumeSnapshot.resources.limits.memory | quote }} +{{- end }} +{{- if .Values.genericVolumeSnapshot.resources.limits.cpu }} +KanisterToolsCPULimits: {{ .Values.genericVolumeSnapshot.resources.limits.cpu | quote }} +{{- end }} +{{- end }} + +{{- define "get.kanisterPodCustomLabels" -}} +{{- if .Values.kanisterPodCustomLabels }} +KanisterPodCustomLabels: {{ .Values.kanisterPodCustomLabels | quote }} +{{- end }} +{{- end }} + +{{- define "get.kanisterPodCustomAnnotations" -}} +{{- if .Values.kanisterPodCustomAnnotations }} +KanisterPodCustomAnnotations: {{ .Values.kanisterPodCustomAnnotations | quote }} +{{- end }} +{{- end }} + +{{/* +Lookup and return only enabled colocated services +*/}} +{{- define "get.enabledColocatedSvcList" -}} +{{- $enabledColocatedSvcList := dict }} +{{- $colocatedList := include "k10.colocatedServiceLookup" . | fromYaml }} +{{- range $primary, $secondaryList := $colocatedList }} + {{- $enabledSecondarySvcList := list }} + {{- range $skip, $secondary := $secondaryList }} + {{- if or (not (hasKey $.Values.optionalColocatedServices $secondary)) ((index $.Values.optionalColocatedServices $secondary).enabled) }} + {{- $enabledSecondarySvcList = append $enabledSecondarySvcList $secondary }} + {{- end }} + {{- end }} + {{- if gt (len $enabledSecondarySvcList) 0 }} + {{- $enabledColocatedSvcList = set $enabledColocatedSvcList $primary $enabledSecondarySvcList }} + {{- end }} +{{- end }} +{{- $enabledColocatedSvcList | toYaml | trim | nindent 0}} +{{- end -}} + +{{- define "get.serviceContainersInPod" -}} +{{- $podService := .k10_service_pod }} +{{- $colocatedList := include "k10.colocatedServices" . | fromYaml }} +{{- $colocatedLookupByPod := include "get.enabledColocatedSvcList" .main | fromYaml }} +{{- $containerList := list $podService }} +{{- if hasKey $colocatedLookupByPod $podService }} + {{- $containerList = concat $containerList (index $colocatedLookupByPod $podService)}} +{{- end }} +{{- $containerList | join " " }} +{{- end -}} + +{{- define "get.statefulRestServicesInPod" -}} +{{- $statefulRestSvcsInPod := list }} +{{- $podService := .k10_service_pod }} +{{- $containerList := (dict "main" .main "k10_service_pod" $podService | include "get.serviceContainersInPod" | splitList " ") }} +{{- if .main.Values.global.persistence.enabled }} + {{- range $skip, $containerInPod := $containerList }} + {{- $isRestService := has $containerInPod (include "k10.restServices" . | splitList " ") }} + {{- $isStatelessService := has $containerInPod (include "k10.statelessServices" . | splitList " ") }} + {{- if and $isRestService (not $isStatelessService) }} + {{- $statefulRestSvcsInPod = append $statefulRestSvcsInPod $containerInPod }} + {{- end }} + {{- end }} +{{- end }} +{{- $statefulRestSvcsInPod | join " " }} +{{- end -}} + +{{- define "k10.ingressPath" -}} + {{- if and .Values.global.ingress.create .Values.global.route.enabled -}} + {{ fail "Either enable ingress or route"}} + {{- end -}} + {{- if .Values.global.ingress.create -}} + {{ if .Values.global.ingress.urlPath }} + {{- print .Values.global.ingress.urlPath -}} + {{ else }} + {{- print .Release.Name -}} + {{- end -}} + {{- else if .Values.global.route.enabled -}} + {{ if .Values.global.route.path }} + {{- print .Values.global.route.path -}} + {{ else }} + {{- print .Release.Name -}} + {{- end -}} + {{ else }} + {{- print .Release.Name -}} + {{- end -}} +{{- end -}} + + +{{/* +Check if encryption keys are specified +*/}} +{{- define "check.primaryKey" -}} +{{- if (or .Values.encryption.primaryKey.awsCmkKeyId .Values.encryption.primaryKey.vaultTransitKeyName) -}} +{{- print true -}} +{{- end -}} +{{- end -}} + +{{- define "check.validateMonitoringProperties" -}} +{{- include "check.monitoringPrefix" . -}} +{{- include "check.monitoringFullNameOverride" . -}} +{{- end -}} + +{{- define "check.monitoringPrefix" -}} +{{- if eq .Values.prometheus.server.enabled .Values.grafana.enabled -}} +{{- if not (eq .Values.prometheus.server.prefixURL .Values.grafana.prometheusPrefixURL) -}} +{{ fail "Prometheus and Grafana prefixURL should match. Please check values of prometheus.server.prefixURL and grafana.prometheusPrefixURL" }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "check.monitoringFullNameOverride" -}} +{{- if eq .Values.prometheus.server.enabled .Values.grafana.enabled -}} +{{- if not (eq .Values.prometheus.server.fullnameOverride .Values.grafana.prometheusName) -}} +{{ fail "The Prometheus name overrides must match. Please check values of prometheus.server.fullnameOverride and grafana.prometheusName" }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{- define "check.validateImagePullSecrets" -}} + {{/* Validate image pull secrets if a custom Docker config is provided */}} + {{- if (or .Values.secrets.dockerConfig .Values.secrets.dockerConfigPath ) -}} + {{- if (and .Values.grafana.enabled (not .Values.global.imagePullSecret) (not .Values.grafana.image.pullSecrets)) -}} + {{ fail "A custom Docker config was provided, but Grafana is not configured to use it. Please check that global.imagePullSecret is set correctly." }} + {{- end -}} + {{- if (and .Values.prometheus.server.enabled (not .Values.global.imagePullSecret) (not .Values.prometheus.imagePullSecrets)) -}} + {{ fail "A custom Docker config was provided, but Prometheus is not configured to use it. Please check that global.imagePullSecret is set correctly." }} + {{- end -}} + {{- end -}} +{{- end -}} + +{{- define "k10.imagePullSecrets" }} +{{- $imagePullSecrets := list .Values.global.imagePullSecret }}{{/* May be empty, but the compact below will handle that */}} +{{- if (or .Values.secrets.dockerConfig .Values.secrets.dockerConfigPath) }} + {{- $imagePullSecrets = concat $imagePullSecrets (list "k10-ecr") }} +{{- end }} +{{- $imagePullSecrets = $imagePullSecrets | compact | uniq }} + +{{- if $imagePullSecrets }} +imagePullSecrets: + {{- range $imagePullSecrets }} + {{/* Check if the name is not empty string */}} + - name: {{ . }} + {{- end }} +{{- end }} +{{- end }} + +{{/* +Below helper template functions are referred from chart +https://github.com/prometheus-community/helm-charts/blob/main/charts/prometheus/templates/_helpers.tpl +*/}} + +{{/* +Return kubernetes version +*/}} +{{- define "k10.kubeVersion" -}} + {{- default .Capabilities.KubeVersion.Version (regexFind "v[0-9]+\\.[0-9]+\\.[0-9]+" .Capabilities.KubeVersion.Version) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "ingress.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19.x" (include "k10.kubeVersion" .)) -}} + {{- print "networking.k8s.io/v1" -}} + {{- else if .Capabilities.APIVersions.Has "extensions/v1beta1" -}} + {{- print "extensions/v1beta1" -}} + {{- else -}} + {{- print "networking.k8s.io/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* +Is ingress part of stable APIVersion. +*/}} +{{- define "ingress.isStable" -}} + {{- eq (include "ingress.apiVersion" .) "networking.k8s.io/v1" -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/templates/_k10_container.tpl b/charts/k10/k10/4.5.1400/templates/_k10_container.tpl new file mode 100644 index 000000000..6d23797e3 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/_k10_container.tpl @@ -0,0 +1,659 @@ +{{- define "k10-containers" }} +{{- $pod := .k10_pod }} +{{- with .main }} +{{- $main_context := . }} +{{- $colocatedList := include "k10.colocatedServices" . | fromYaml }} +{{- $containerList := (dict "main" $main_context "k10_service_pod" $pod | include "get.serviceContainersInPod" | splitList " ") }} + containers: +{{- range $skip, $container := $containerList }} + {{- $port := default $main_context.Values.service.externalPort (index $colocatedList $container).port }} + {{- $serviceStateful := has $container (dict "main" $main_context "k10_service_pod" $pod | include "get.statefulRestServicesInPod" | splitList " ") }} + {{- dict "main" $main_context "k10_pod" $pod "k10_container" $container "externalPort" $port "stateful" $serviceStateful | include "k10-container" }} +{{- end }} +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-containers" */}} + +{{- define "k10-container" }} +{{- $pod := .k10_pod }} +{{- $service := .k10_container }} +{{- $externalPort := .externalPort }} +{{- with .main }} + - name: {{ $service }}-svc + {{- dict "main" . "k10_service" $service | include "serviceImage" | indent 8 }} + imagePullPolicy: {{ .Values.image.pullPolicy }} +{{- if eq $service "aggregatedapis" }} + args: + - "--secure-port={{ .Values.service.aggregatedApiPort }}" + - "--cert-dir=/tmp/apiserver.local.config/certificates/" +{{- if .Values.useNamespacedAPI }} + - "--k10-api-domain={{ template "apiDomain" . }}" +{{- end }}{{/* .Values.useNamespacedAPI */}} +{{/* +We need this explicit conversion because installation using operator hub was failing +stating that types are not same for the equality check +*/}} +{{- else if not (eq (int .Values.service.externalPort) (int $externalPort) ) }} + args: + - "--port={{ $externalPort }}" + - "--host=0.0.0.0" +{{- end }}{{/* eq $service "aggregatedapis" */}} +{{- $podName := (printf "%s-svc" $service) }} +{{- $containerName := (printf "%s-svc" $service) }} +{{- dict "main" . "k10_service_pod_name" $podName "k10_service_container_name" $containerName | include "k10.resource.request" | indent 8}} + ports: +{{- if eq $service "aggregatedapis" }} + - containerPort: {{ .Values.service.aggregatedApiPort }} +{{- else }} + - containerPort: {{ $externalPort }} +{{- end }} +{{- if eq $service "logging" }} + - containerPort: 24224 + protocol: TCP + - containerPort: 24225 + protocol: TCP +{{- end }} + livenessProbe: +{{- if eq $service "aggregatedapis" }} + tcpSocket: + port: {{ .Values.service.aggregatedApiPort }} + timeoutSeconds: 5 +{{- else }} + httpGet: + path: /v0/healthz + port: {{ $externalPort }} + timeoutSeconds: 1 +{{- end }} + initialDelaySeconds: 300 +{{- if ne $service "aggregatedapis" }} + readinessProbe: + httpGet: + path: /v0/healthz + port: {{ $externalPort }} + initialDelaySeconds: 3 +{{- end }} + env: +{{- if eq (include "check.googlecreds" .) "true" }} + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "/var/run/secrets/kasten.io/kasten-gke-sa.json" +{{- end }} +{{- if eq (include "check.ibmslcreds" .) "true" }} + - name: IBM_SL_API_KEY + valueFrom: + secretKeyRef: + name: ibmsl-secret + key: ibm_sl_key + - name: IBM_SL_API_USERNAME + valueFrom: + secretKeyRef: + name: ibmsl-secret + key: ibm_sl_username +{{- end }} +{{- if eq (include "check.azurecreds" .) "true" }} + - name: AZURE_TENANT_ID + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_tenant_id + - name: AZURE_CLIENT_ID + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_client_id + - name: AZURE_CLIENT_SECRET + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_client_secret +{{- if .Values.secrets.azureResourceGroup }} + - name: AZURE_RESOURCE_GROUP + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_resource_group +{{- end }} +{{- if .Values.secrets.azureSubscriptionID }} + - name: AZURE_SUBSCRIPTION_ID + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_subscription_id +{{- end }} +{{- if .Values.secrets.azureResourceMgrEndpoint }} + - name: AZURE_RESOURCE_MANAGER_ENDPOINT + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_resource_manager_endpoint +{{- end }} +{{- if .Values.secrets.azureADEndpoint }} + - name: AZURE_AD_ENDPOINT + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_ad_endpoint +{{- end }} +{{- if .Values.secrets.azureADResourceID }} + - name: AZURE_AD_RESOURCE + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_ad_resource_id +{{- end }} +{{- if .Values.secrets.azureCloudEnvID }} + - name: AZURE_CLOUD_ENV_ID + valueFrom: + secretKeyRef: + name: azure-creds + key: azure_cloud_env_id +{{- end }} +{{- end }} +{{- if eq (include "check.awscreds" .) "true" }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: aws-creds + key: aws_access_key_id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: aws-creds + key: aws_secret_access_key +{{- if .Values.secrets.awsIamRole }} + - name: K10_AWS_IAM_ROLE + valueFrom: + secretKeyRef: + name: aws-creds + key: role +{{- end }} +{{- end }} +{{- if eq (include "check.vaultcreds" .) "true" }} + - name: VAULT_ADDR + value: {{ .Values.vault.address }} + - name: VAULT_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Values.vault.secretName }} + key: vault_token +{{- end }} +{{- if eq (include "check.vspherecreds" .) "true" }} + - name: VSPHERE_ENDPOINT + valueFrom: + secretKeyRef: + name: vsphere-creds + key: vsphere_endpoint + - name: VSPHERE_USERNAME + valueFrom: + secretKeyRef: + name: vsphere-creds + key: vsphere_username + - name: VSPHERE_PASSWORD + valueFrom: + secretKeyRef: + name: vsphere-creds + key: vsphere_password +{{- end }} + - name: VERSION + valueFrom: + configMapKeyRef: + name: k10-config + key: version +{{- if .Values.clusterName }} + - name: CLUSTER_NAME + valueFrom: + configMapKeyRef: + name: k10-config + key: clustername +{{- end }} +{{- if eq $service "config" }} + - name: K10_STATEFUL + value: "{{ .Values.global.persistence.enabled }}" +{{- end }} + - name: MODEL_STORE_DIR +{{- if or (eq $service "state") (not .Values.global.persistence.enabled) }} + value: "/tmp/k10store" +{{- else }} + valueFrom: + configMapKeyRef: + name: k10-config + key: modelstoredirname +{{- end }} +{{- if or (eq $service "kanister") (eq $service "executor")}} + - name: DATA_MOVER_IMAGE + value: {{ default .Chart.AppVersion .Values.image.tag | print .Values.image.registry "/" .Values.image.repository "/datamover:" }} + - name: KANISTER_POD_READY_WAIT_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterPodReadyWaitTimeout +{{- end }} + - name: LOG_LEVEL + valueFrom: + configMapKeyRef: + name: k10-config + key: loglevel +{{- if .Values.kanisterPodCustomLabels }} + - name: KANISTER_POD_CUSTOM_LABELS + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterPodCustomLabels +{{- end }} +{{- if .Values.kanisterPodCustomAnnotations }} + - name: KANISTER_POD_CUSTOM_ANNOTATIONS + valueFrom: + configMapKeyRef: + name: k10-config + key: kanisterPodCustomAnnotations +{{- end }} + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONCURRENT_SNAP_CONVERSIONS + valueFrom: + configMapKeyRef: + name: k10-config + key: concurrentSnapConversions + - name: CONCURRENT_WORKLOAD_SNAPSHOTS + valueFrom: + configMapKeyRef: + name: k10-config + key: concurrentWorkloadSnapshots + - name: K10_DATA_STORE_PARALLEL_UPLOAD + valueFrom: + configMapKeyRef: + name: k10-config + key: k10DataStoreParallelUpload + - name: K10_DATA_STORE_GENERAL_CONTENT_CACHE_SIZE_MB + valueFrom: + configMapKeyRef: + name: k10-config + key: k10DataStoreGeneralContentCacheSizeMB + - name: K10_DATA_STORE_GENERAL_METADATA_CACHE_SIZE_MB + valueFrom: + configMapKeyRef: + name: k10-config + key: k10DataStoreGeneralMetadataCacheSizeMB + - name: K10_DATA_STORE_RESTORE_CONTENT_CACHE_SIZE_MB + valueFrom: + configMapKeyRef: + name: k10-config + key: k10DataStoreRestoreContentCacheSizeMB + - name: K10_DATA_STORE_RESTORE_METADATA_CACHE_SIZE_MB + valueFrom: + configMapKeyRef: + name: k10-config + key: k10DataStoreRestoreMetadataCacheSizeMB + - name: K10_LIMITER_GENERIC_VOLUME_SNAPSHOTS + valueFrom: + configMapKeyRef: + name: k10-config + key: K10LimiterGenericVolumeSnapshots + - name: K10_LIMITER_GENERIC_VOLUME_COPIES + valueFrom: + configMapKeyRef: + name: k10-config + key: K10LimiterGenericVolumeCopies + - name: K10_LIMITER_GENERIC_VOLUME_RESTORES + valueFrom: + configMapKeyRef: + name: k10-config + key: K10LimiterGenericVolumeRestores + - name: K10_LIMITER_CSI_SNAPSHOTS + valueFrom: + configMapKeyRef: + name: k10-config + key: K10LimiterCsiSnapshots + - name: K10_LIMITER_PROVIDER_SNAPSHOTS + valueFrom: + configMapKeyRef: + name: k10-config + key: K10LimiterProviderSnapshots + - name: AWS_ASSUME_ROLE_DURATION + valueFrom: + configMapKeyRef: + name: k10-config + key: AWSAssumeRoleDuration +{{- if (eq $service "executor") }} + - name: KANISTER_BACKUP_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterBackupTimeout + - name: KANISTER_RESTORE_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterRestoreTimeout + - name: KANISTER_DELETE_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterDeleteTimeout + - name: KANISTER_HOOK_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterHookTimeout + - name: KANISTER_CHECKREPO_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterCheckRepoTimeout + - name: KANISTER_STATS_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterStatsTimeout + - name: KANISTER_EFSPOSTRESTORE_TIMEOUT + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterEFSPostRestoreTimeout +{{- end }} +{{- if and (eq $service "executor") (.Values.awsConfig.efsBackupVaultName) }} + - name: EFS_BACKUP_VAULT_NAME + valueFrom: + configMapKeyRef: + name: k10-config + key: efsBackupVaultName +{{- end }} +{{- if and (eq $service "executor") (.Values.vmWare.taskTimeoutMin) }} + - name: VMWARE_GOM_TIMEOUT_MIN + valueFrom: + configMapKeyRef: + name: k10-config + key: vmWareTaskTimeoutMin +{{- end }} +{{- if .Values.useNamespacedAPI }} + - name: K10_API_DOMAIN + valueFrom: + configMapKeyRef: + name: k10-config + key: apiDomain +{{- end }} +{{- if .Values.jaeger.enabled }} + - name: JAEGER_AGENT_HOST + value: {{ .Values.jaeger.agentDNS }} +{{- end }} +{{- if .Values.auth.tokenAuth.enabled }} + - name: TOKEN_AUTH + valueFrom: + secretKeyRef: + name: k10-token-auth + key: auth +{{- end }} +{{- if eq "true" (include "overwite.kanisterToolsImage" .) }} + - name: KANISTER_TOOLS + valueFrom: + configMapKeyRef: + name: k10-config + key: overwriteKanisterTools +{{- end }} +{{- if eq (include "check.cacertconfigmap" .) "true" }} + - name: CACERT_CONFIGMAP_NAME + value: {{ .Values.cacertconfigmap.name }} +{{- end }} + - name: K10_RELEASE_NAME + value: {{ .Release.Name }} + - name: KANISTER_FUNCTION_VERSION + valueFrom: + configMapKeyRef: + name: k10-config + key: kanisterFunctionVersion +{{- if and (eq $service "config") (.Values.injectKanisterSidecar.enabled) }} + - name: K10_MUTATING_WEBHOOK_ENABLED + value: "true" + - name: K10_MUTATING_WEBHOOK_TLS_CERT_DIR + valueFrom: + configMapKeyRef: + name: k10-config + key: K10MutatingWebhookTLSCertDir + - name: K10_MUTATING_WEBHOOK_PORT + value: {{ .Values.injectKanisterSidecar.webhookServer.port | quote }} +{{- end }} +{{- if or (eq $service "config") (eq $service "kanister") }} +{{- if .Values.genericVolumeSnapshot.resources.requests.memory }} + - name: KANISTER_TOOLS_MEMORY_REQUESTS + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterToolsMemoryRequests +{{- end }} +{{- if .Values.genericVolumeSnapshot.resources.requests.cpu }} + - name: KANISTER_TOOLS_CPU_REQUESTS + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterToolsCPURequests +{{- end }} +{{- if .Values.genericVolumeSnapshot.resources.limits.memory }} + - name: KANISTER_TOOLS_MEMORY_LIMITS + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterToolsMemoryLimits +{{- end }} +{{- if .Values.genericVolumeSnapshot.resources.limits.cpu }} + - name: KANISTER_TOOLS_CPU_LIMITS + valueFrom: + configMapKeyRef: + name: k10-config + key: KanisterToolsCPULimits +{{- end }} +{{- end }} +{{- if (list "dashboardbff" "config" "executor" | has $service) }} + {{- if .Values.prometheus.server.enabled }} + - name: K10_PROMETHEUS_HOST + value: {{ include "k10.prometheus.service.name" . }}-exp + - name: K10_PROMETHEUS_PORT + value: {{ .Values.prometheus.server.service.servicePort | quote }} + - name: K10_PROMETHEUS_BASE_URL + value: {{ .Values.prometheus.server.baseURL }} + {{- end }} + - name: K10_GRAFANA_ENABLED + value: {{ .Values.grafana.enabled | quote }} +{{- end }} +{{- if or $.stateful (or (eq (include "check.googlecreds" .) "true") (eq $service "auth" "logging")) }} + volumeMounts: +{{- else if or (or (eq (include "basicauth.check" .) "true") (or .Values.auth.oidcAuth.enabled (eq (include "check.dexAuth" .) "true"))) .Values.features }} + volumeMounts: +{{- else if and (eq $service "config") (.Values.injectKanisterSidecar.enabled) }} + volumeMounts: +{{- else if eq (include "check.cacertconfigmap" .) "true" }} + volumeMounts: +{{- end }} +{{- if $.stateful }} + - name: {{ $service }}-persistent-storage + mountPath: {{ .Values.global.persistence.mountPath | quote }} +{{- end }} +{{- if .Values.features }} + - name: k10-features + mountPath: "/mnt/k10-features" +{{- end }} +{{- if eq $service "logging" }} + - name: logging-configmap-storage + mountPath: "/mnt/conf" +{{- end }} +{{- if and (eq $service "config") (.Values.injectKanisterSidecar.enabled) }} + - name: mutating-webhook-certs + mountPath: /etc/ssl/certs/webhook + readOnly: true +{{- end }} +{{- if eq (include "basicauth.check" .) "true" }} + - name: k10-basic-auth + mountPath: "/var/run/secrets/kasten.io/k10-basic-auth" + readOnly: true +{{- end }} +{{- if (or .Values.auth.oidcAuth.enabled (eq (include "check.dexAuth" .) "true")) }} + - name: k10-oidc-auth + mountPath: "/var/run/secrets/kasten.io/k10-oidc-auth" + readOnly: true +{{- end }} +{{- if eq (include "check.googlecreds" .) "true" }} + - name: service-account + mountPath: "/var/run/secrets/kasten.io" +{{- end }} +{{- if eq (include "check.cacertconfigmap" .) "true" }} + - name: {{ .Values.cacertconfigmap.name }} + mountPath: "/etc/ssl/certs/custom-ca-bundle.pem" + subPath: custom-ca-bundle.pem +{{- end }} +{{- if .Values.toolsImage.enabled }} +{{- if eq $service "executor" }} + - name: tools + {{- dict "main" . "k10_service" "cephtool" | include "serviceImage" | indent 8 }} + imagePullPolicy: {{ .Values.toolsImage.pullPolicy }} +{{- $podName := (printf "%s-svc" $service) }} +{{- dict "main" . "k10_service_pod_name" $podName "k10_service_container_name" "tools" | include "k10.resource.request" | indent 8}} +{{- end }} +{{- end }} {{/* .Values.toolsImage.enabled */}} +{{- if and (eq $service "catalog") $.stateful }} + - name: kanister-sidecar + image: {{ include "get.kanisterToolsImage" .}} + imagePullPolicy: {{ .Values.kanisterToolsImage.pullPolicy }} +{{- $podName := (printf "%s-svc" $service) }} +{{- dict "main" . "k10_service_pod_name" $podName "k10_service_container_name" "kanister-sidecar" | include "k10.resource.request" | indent 8}} + volumeMounts: + - name: {{ $service }}-persistent-storage + mountPath: {{ .Values.global.persistence.mountPath | quote }} +{{- if eq (include "check.cacertconfigmap" .) "true" }} + - name: {{ .Values.cacertconfigmap.name }} + mountPath: "/etc/ssl/certs/custom-ca-bundle.pem" + subPath: custom-ca-bundle.pem +{{- end }} +{{- end }} {{/* and (eq $service "catalog") $.stateful */}} +{{- if and ( eq $service "auth" ) ( or .Values.auth.dex.enabled (eq (include "check.dexAuth" .) "true")) }} + - name: dex + image: {{ include "k10.dexImage" . }} +{{- if .Values.auth.ldap.enabled }} + command: ["/usr/local/bin/dex", "serve", "/dex-config/config.yaml"] +{{- else }} + command: ["/usr/local/bin/dex", "serve", "/etc/dex/cfg/config.yaml"] +{{- end }} + ports: + - name: http + containerPort: 8080 + volumeMounts: +{{- if .Values.auth.ldap.enabled }} + - name: dex-config + mountPath: /dex-config + - name: k10-logos-dex + mountPath: /web/themes/custom/ +{{- else }} + - name: config + mountPath: /etc/dex/cfg +{{- end }} +{{- if eq (include "check.cacertconfigmap" .) "true" }} + - name: {{ .Values.cacertconfigmap.name }} + mountPath: "/etc/ssl/certs/custom-ca-bundle.pem" + subPath: custom-ca-bundle.pem +{{- end }} +{{- end }} {{/* end of dex check */}} +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-container" */}} + +{{- define "k10-init-container-header" }} +{{- $pod := .k10_pod }} +{{- with .main }} +{{- $main_context := . }} +{{- $containerList := (dict "main" $main_context "k10_service_pod" $pod | include "get.serviceContainersInPod" | splitList " ") }} +{{- $needsInitContainersHeader := false }} +{{- range $skip, $service := $containerList }} +{{- $serviceStateful := has $service (dict "main" $main_context "k10_service_pod" $pod | include "get.statefulRestServicesInPod" | splitList " ") }} + {{- if and ( eq $service "auth" ) $main_context.Values.auth.ldap.enabled }} + {{- $needsInitContainersHeader = true }} + {{- else if $serviceStateful }} + {{- $needsInitContainersHeader = true }} + {{- end }}{{/* initContainers header needed check */}} +{{- end }}{{/* range $skip, $service := $containerList */}} +{{- if $needsInitContainersHeader }} + initContainers: +{{- end }} +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-init-container-header" */}} + +{{- define "k10-init-container" }} +{{- $pod := .k10_pod }} +{{- with .main }} +{{- $main_context := . }} +{{- $containerList := (dict "main" $main_context "k10_service_pod" $pod | include "get.serviceContainersInPod" | splitList " ") }} +{{- range $skip, $service := $containerList }} +{{- $serviceStateful := has $service (dict "main" $main_context "k10_service_pod" $pod | include "get.statefulRestServicesInPod" | splitList " ") }} +{{- if and ( eq $service "auth" ) $main_context.Values.auth.ldap.enabled }} + - name: dex-init + command: + - /dex/dexconfigmerge + args: + - --config-path=/etc/dex/cfg/config.yaml + - --secret-path=/var/run/secrets/kasten.io/bind-secret/bindPW + - --new-config-path=/dex-config/config.yaml + - --secret-field=bindPW + {{- dict "main" $main_context "k10_service" $service | include "serviceImage" | indent 8 }} + volumeMounts: + - mountPath: /etc/dex/cfg + name: config + - mountPath: /dex-config + name: dex-config + - name: bind-secret + mountPath: "/var/run/secrets/kasten.io/bind-secret" + readOnly: true +{{- else if $serviceStateful }} + - name: upgrade-init + securityContext: + runAsUser: 0 + allowPrivilegeEscalation: true + {{- dict "main" $main_context "k10_service" "upgrade" | include "serviceImage" | indent 8 }} + imagePullPolicy: {{ $main_context.Values.image.pullPolicy }} + env: + - name: MODEL_STORE_DIR + valueFrom: + configMapKeyRef: + name: k10-config + key: modelstoredirname + volumeMounts: + - name: {{ $service }}-persistent-storage + mountPath: {{ $main_context.Values.global.persistence.mountPath | quote }} +{{- if eq $service "catalog" }} + - name: schema-upgrade-check + {{- dict "main" $main_context "k10_service" $service | include "serviceImage" | indent 8 }} + imagePullPolicy: {{ $main_context.Values.image.pullPolicy }} + env: +{{- if $main_context.Values.clusterName }} + - name: CLUSTER_NAME + valueFrom: + configMapKeyRef: + name: k10-config + key: clustername +{{- end }} + - name: INIT_CONTAINER + value: "true" + - name: K10_RELEASE_NAME + value: {{ $main_context.Release.Name }} + - name: LOG_LEVEL + valueFrom: + configMapKeyRef: + name: k10-config + key: loglevel + - name: MODEL_STORE_DIR + valueFrom: + configMapKeyRef: + name: k10-config + key: modelstoredirname + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: VERSION + valueFrom: + configMapKeyRef: + name: k10-config + key: version + volumeMounts: + - name: {{ $service }}-persistent-storage + mountPath: {{ $main_context.Values.global.persistence.mountPath | quote }} +{{- end }}{{/* eq $service "catalog" */}} +{{- end }}{{/* initContainers definitions */}} +{{- end }}{{/* range $skip, $service := $containerList */}} +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-init-container" */}} diff --git a/charts/k10/k10/4.5.1400/templates/_k10_metering.tpl b/charts/k10/k10/4.5.1400/templates/_k10_metering.tpl new file mode 100644 index 000000000..5f3ecc1f3 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/_k10_metering.tpl @@ -0,0 +1,261 @@ +{{/* Generate service spec */}} +{{/* because of https://github.com/GoogleCloudPlatform/marketplace-k8s-app-tools/issues/165 +we have to start using .Values.reportingSecret instead +of correct version .Values.metering.reportingSecret */}} +{{- define "k10-metering" }} +{{ $service := .k10_service }} +{{- with .main }} +{{- if $.stateful }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: {{ .Release.Namespace }} + name: {{ $service }}-pv-claim + labels: +{{ include "helm.labels" . | indent 4 }} + component: {{ $service }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ default .Values.global.persistence.size (index .Values.global.persistence $service "size") }} +{{- if .Values.global.persistence.storageClass }} + {{- if (eq "-" .Values.global.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.global.persistence.storageClass }}" + {{- end }} +{{- end }} +--- +{{- end }}{{/* if $.stateful */}} +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: {{ include "fullname" . }}-metering-config +data: + config: | +{{- if .Values.metering.reportingKey }} + identities: + - name: gcp + gcp: + encodedServiceAccountKey: {{ .Values.metering.reportingKey }} +{{- end }} + metrics: + - name: node_time + type: int + passthrough: {} + endpoints: + - name: on_disk +{{- if .Values.metering.reportingKey }} + - name: servicecontrol +{{- end }} + endpoints: + - name: on_disk + disk: +{{- if .Values.global.persistence.enabled }} + reportDir: /var/reports/ubbagent/reports +{{- else }} + reportDir: /tmp/reports/ubbagent/reports +{{- end }} + expireSeconds: 3600 +{{- if .Values.metering.reportingKey }} + - name: servicecontrol + servicecontrol: + identity: gcp + serviceName: kasten-k10.mp-kasten-public.appspot.com + consumerId: {{ .Values.metering.consumerId }} +{{- end }} +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: {{ .Release.Namespace }} + name: {{ $service }}-svc + labels: +{{ include "helm.labels" . | indent 4 }} + component: {{ $service }} +spec: + replicas: {{ $.replicas }} + strategy: + type: Recreate + selector: + matchLabels: +{{ include "k10.common.matchLabels" . | indent 6 }} + component: {{ $service }} + run: {{ $service }}-svc + template: + metadata: + annotations: + checksum/config: {{ include (print .Template.BasePath "/k10-config.yaml") . | sha256sum }} + checksum/secret: {{ include (print .Template.BasePath "/secrets.yaml") . | sha256sum }} + labels: +{{ include "helm.labels" . | indent 8 }} + component: {{ $service }} + run: {{ $service }}-svc + spec: + securityContext: +{{ toYaml .Values.services.securityContext | indent 8 }} + serviceAccountName: {{ template "meteringServiceAccountName" . }} + {{- include "k10.imagePullSecrets" . | indent 6 }} +{{- if $.stateful }} + initContainers: + - name: upgrade-init + securityContext: + runAsUser: 0 + allowPrivilegeEscalation: true + {{- dict "main" . "k10_service" "upgrade" | include "serviceImage" | indent 8 }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: MODEL_STORE_DIR + value: /var/reports/ + volumeMounts: + - name: {{ $service }}-persistent-storage + mountPath: /var/reports/ +{{- end }} + containers: + - name: {{ $service }}-svc + {{- dict "main" . "k10_service" $service | include "serviceImage" | indent 8 }} + imagePullPolicy: {{ .Values.image.pullPolicy }} +{{- if eq .Release.Namespace "default" }} +{{- $podName := (printf "%s-svc" $service) }} +{{- $containerName := (printf "%s-svc" $service) }} +{{- dict "main" . "k10_service_pod_name" $podName "k10_service_container_name" $containerName | include "k10.resource.request" | indent 8}} +{{- end }} + ports: + - containerPort: {{ .Values.service.externalPort }} + livenessProbe: + httpGet: + path: /v0/healthz + port: {{ .Values.service.externalPort }} + initialDelaySeconds: 90 + timeoutSeconds: 1 + env: + - name: VERSION + valueFrom: + configMapKeyRef: + name: k10-config + key: version +{{- if .Values.clusterName }} + - name: CLUSTER_NAME + valueFrom: + configMapKeyRef: + name: k10-config + key: clustername +{{- end }} + - name: LOG_LEVEL + valueFrom: + configMapKeyRef: + name: k10-config + key: loglevel + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace +{{- if .Values.useNamespacedAPI }} + - name: K10_API_DOMAIN + valueFrom: + configMapKeyRef: + name: k10-config + key: apiDomain +{{- end }} + - name: AGENT_CONFIG_FILE + value: /var/ubbagent/config.yaml + - name: AGENT_STATE_DIR +{{- if .Values.global.persistence.enabled }} + value: "/var/reports/ubbagent" +{{- else }} + value: "/tmp/reports/ubbagent" + - name: K10_REPORTING_DIR + value: "/tmp/reports/k10/syncV2" + - name: K10SYNCSTATUSDIR + value: "/tmp/reports/k10" + - name: GRACE_PERIOD_STORE + value: /tmp/reports/clustergraceperiod + - name: NODE_USAGE_STORE + value: /tmp/reports/node_usage_history +{{- end }} +{{- if eq "true" (include "overwite.kanisterToolsImage" .) }} + - name: KANISTER_TOOLS + valueFrom: + configMapKeyRef: + name: k10-config + key: overwriteKanisterTools +{{- end }} +{{- if .Values.metering.awsRegion }} + - name: AWS_REGION + value: {{ .Values.metering.awsRegion }} +{{- end }} +{{- if .Values.metering.mode }} + - name: K10REPORTMODE + value: {{ .Values.metering.mode }} +{{- end }} +{{- if .Values.metering.reportCollectionPeriod }} + - name: K10_REPORT_COLLECTION_PERIOD + value: {{ .Values.metering.reportCollectionPeriod | quote }} +{{- end }} +{{- if .Values.metering.reportPushPeriod }} + - name: K10_REPORT_PUSH_PERIOD + value: {{ .Values.metering.reportPushPeriod | quote }} +{{- end }} +{{- if .Values.metering.promoID }} + - name: K10_PROMOTION_ID + value: {{ .Values.metering.promoID }} +{{- end }} +{{- if .Values.reportingSecret }} + - name: AGENT_CONSUMER_ID + valueFrom: + secretKeyRef: + name: {{ .Values.reportingSecret }} + key: consumer-id + - name: AGENT_REPORTING_KEY + valueFrom: + secretKeyRef: + name: {{ .Values.reportingSecret }} + key: reporting-key + - name: K10_RELEASE_NAME + value: {{ .Release.Name }} +{{- end }} +{{- if .Values.metering.licenseConfigSecretName }} + - name: AWS_WEB_IDENTITY_REFRESH_TOKEN_FILE + value: "/var/run/secrets/product-license/license_token" + - name: AWS_ROLE_ARN + valueFrom: + secretKeyRef: + name: {{ .Values.metering.licenseConfigSecretName }} + key: iam_role +{{- end }} + volumeMounts: + - name: meter-config + mountPath: /var/ubbagent +{{- if $.stateful }} + - name: {{ $service }}-persistent-storage + mountPath: /var/reports/ +{{- end }} +{{- if .Values.metering.licenseConfigSecretName }} + - name: awsmp-product-license + mountPath: "/var/run/secrets/product-license" +{{- end }} + volumes: + - name: meter-config + configMap: + name: {{ include "fullname" . }}-metering-config + items: + - key: config + path: config.yaml +{{- if $.stateful }} + - name: {{ $service }}-persistent-storage + persistentVolumeClaim: + claimName: {{ $service }}-pv-claim +{{- end }} +{{- if .Values.metering.licenseConfigSecretName }} + - name: awsmp-product-license + secret: + secretName: {{ .Values.metering.licenseConfigSecretName }} +{{- end }} +--- +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-metering" */}} diff --git a/charts/k10/k10/4.5.1400/templates/_k10_serviceimage.tpl b/charts/k10/k10/4.5.1400/templates/_k10_serviceimage.tpl new file mode 100644 index 000000000..d9e69a8a4 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/_k10_serviceimage.tpl @@ -0,0 +1,51 @@ +{{/* +Helper to get k10 service image +The details on how these image are being generated +is in below issue +https://kasten.atlassian.net/browse/K10-4036 +Using substr to remove repo from ambassadorImage +*/}} +{{- define "serviceImage" -}} +{{/* +we are maintaining the field .Values.images to override it when +we install the chart for red hat marketplace. If we dont +have the value specified use earlier flow, if it is, use the +value that is specified. +*/}} +{{- if not .main.Values.global.rhMarketPlace }} +{{- $serviceImage := "" -}} +{{- $tagFromDefs := "" -}} +{{- if .main.Values.global.airgapped.repository }} +{{- $serviceImage = default .main.Chart.AppVersion .main.Values.image.tag | print .main.Values.global.airgapped.repository "/" .k10_service ":" }} +{{- else if contains .main.Values.image.registry .main.Values.image.repository }} +{{- $serviceImage = default .main.Chart.AppVersion .main.Values.image.tag | print .main.Values.image.repository "/" .k10_service ":" }} +{{- else }} +{{- $serviceImage = default .main.Chart.AppVersion .main.Values.image.tag | print .main.Values.image.registry "/" .main.Values.image.repository "/" .k10_service ":" }} +{{- end }}{{/* if .main.Values.global.airgapped.repository */}} +{{- $serviceImageKey := print (replace "-" "" .k10_service) "Image" }} +{{- if eq $serviceImageKey "ambassadorImage" }} +{{- $tagFromDefs = (include "k10.ambassadorImageTag" .) }} +{{- else if eq $serviceImageKey "dexImage" }} +{{- $tagFromDefs = (include "k10.dexImageTag" .) }} +{{- end }}{{/* if eq $serviceImageKey "ambassadorImage" */}} +{{- if index .main.Values $serviceImageKey }} +{{- $service_values := index .main.Values $serviceImageKey }} +{{- if .main.Values.global.airgapped.repository }} +{{ $valuesImage := (splitList "/" (index $service_values "image")) }} +{{- if $tagFromDefs }} +image: {{ printf "%s/%s:k10-%s" .main.Values.global.airgapped.repository (index $valuesImage (sub (len $valuesImage) 1) ) $tagFromDefs -}} +{{- end }} +{{- else }}{{/* .main.Values.global.airgapped.repository */}} +{{- if $tagFromDefs }} +image: {{ printf "%s:%s" (index $service_values "image") $tagFromDefs }} +{{- else }} +image: {{ index $service_values "image" }} +{{- end }} +{{- end }}{{/* .main.Values.global.airgapped.repository */}} +{{- else }} +image: {{ $serviceImage }} +{{- end -}}{{/* index .main.Values $serviceImageKey */}} +{{- else }} +image: {{ printf "%s" (get .main.Values.global.images .k10_service) }} +{{- end }}{{/* if not .main.Values.images.executor */}} +{{- end -}}{{/* define "serviceImage" */}} diff --git a/charts/k10/k10/4.5.1400/templates/_k10_template.tpl b/charts/k10/k10/4.5.1400/templates/_k10_template.tpl new file mode 100644 index 000000000..30a0ac977 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/_k10_template.tpl @@ -0,0 +1,190 @@ +{{/* Generate service spec */}} +{{- define "k10-default" }} +{{- $service := .k10_service }} +{{- with .main }} +{{- $main_context := . }} +{{- range $skip, $statefulContainer := compact (dict "main" $main_context "k10_service_pod" $service | include "get.statefulRestServicesInPod" | splitList " ") }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: {{ $main_context.Release.Namespace }} + name: {{ $statefulContainer }}-pv-claim + labels: +{{ include "helm.labels" $main_context | indent 4 }} + component: {{ $statefulContainer }} +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ default $main_context.Values.global.persistence.size (index $main_context.Values.global.persistence $statefulContainer "size") }} +{{- if $main_context.Values.global.persistence.storageClass }} + {{- if (eq "-" $main_context.Values.global.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ $main_context.Values.global.persistence.storageClass }}" + {{- end }} +{{- end }} +--- +{{- end }}{{/* if $.stateful */}} +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: {{ .Release.Namespace }} + name: {{ $service }}-svc + labels: +{{ include "helm.labels" . | indent 4 }} + component: {{ $service }} +spec: + replicas: {{ $.replicas }} + strategy: + type: Recreate + selector: + matchLabels: +{{ include "k10.common.matchLabels" . | indent 6 }} + component: {{ $service }} + run: {{ $service }}-svc + template: + metadata: + annotations: + checksum/config: {{ include (print .Template.BasePath "/k10-config.yaml") . | sha256sum }} + checksum/secret: {{ include (print .Template.BasePath "/secrets.yaml") . | sha256sum }} +{{- if .Values.auth.ldap.restartPod }} + rollme: {{ randAlphaNum 5 | quote }} +{{- end}} + labels: +{{ include "helm.labels" . | indent 8 }} + component: {{ $service }} + run: {{ $service }}-svc + spec: +{{- if eq $service "executor" }} +{{- if .Values.services.executor.hostNetwork }} + hostNetwork: true +{{- end }}{{/* .Values.services.executor.hostNetwork */}} +{{- end }}{{/* eq $service "executor" */}} +{{- if eq $service "aggregatedapis" }} +{{- if .Values.services.aggregatedapis.hostNetwork }} + hostNetwork: true +{{- end }}{{/* .Values.services.aggregatedapis.hostNetwork */}} +{{- end }}{{/* eq $service "aggregatedapis" */}} +{{- if eq $service "dashboardbff" }} +{{- if .Values.services.dashboardbff.hostNetwork }} + hostNetwork: true +{{- end }}{{/* .Values.services.dashboardbff.hostNetwork */}} +{{- end }}{{/* eq $service "dashboardbff" */}} + securityContext: +{{ toYaml .Values.services.securityContext | indent 8 }} + serviceAccountName: {{ template "serviceAccountName" . }} + {{- include "k10.imagePullSecrets" . | indent 6 }} +{{- /* initContainers: */}} +{{- (dict "main" . "k10_pod" $service | include "k10-init-container-header") }} +{{- (dict "main" . "k10_pod" $service | include "k10-init-container") }} +{{- /* containers: */}} +{{- (dict "main" . "k10_pod" $service | include "k10-containers") }} +{{- /* volumes: */}} +{{- (dict "main" . "k10_pod" $service | include "k10-deployment-volumes-header") }} +{{- (dict "main" . "k10_pod" $service | include "k10-deployment-volumes") }} +--- +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-default" */}} + +{{- define "k10-deployment-volumes-header" }} +{{- $pod := .k10_pod }} +{{- with .main }} +{{- $main_context := . }} +{{- $containerList := (dict "main" $main_context "k10_service_pod" $pod | include "get.serviceContainersInPod" | splitList " ") }} +{{- $needsVolumesHeader := false }} +{{- range $skip, $service := $containerList }} + {{- $serviceStateful := has $service (dict "main" $main_context "k10_service_pod" $pod | include "get.statefulRestServicesInPod" | splitList " ") }} + {{- if or $serviceStateful (or (eq (include "check.googlecreds" $main_context) "true") (eq $service "auth" "logging")) }} + {{- $needsVolumesHeader = true }} + {{- else if or (or (eq (include "basicauth.check" $main_context) "true") (or $main_context.Values.auth.oidcAuth.enabled (eq (include "check.dexAuth" $main_context) "true"))) $main_context.Values.features }} + {{- $needsVolumesHeader = true }} + {{- else if and (eq $service "config") ($main_context.Values.injectKanisterSidecar.enabled) }} + {{- $needsVolumesHeader = true }} + {{- else if eq (include "check.cacertconfigmap" $main_context) "true" }} + {{- $needsVolumesHeader = true }} + {{- else if and ( eq $service "auth" ) ( or $main_context.Values.auth.dex.enabled (eq (include "check.dexAuth" $main_context) "true")) }} + {{- $needsVolumesHeader = true }} + {{- end }}{{/* volumes header needed check */}} +{{- end }}{{/* range $skip, $service := $containerList */}} +{{- if $needsVolumesHeader }} + volumes: +{{- end }} +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-init-container-header" */}} + +{{- define "k10-deployment-volumes" }} +{{- $pod := .k10_pod }} +{{- with .main }} +{{- if .Values.features }} + - name: k10-features + configMap: + name: k10-features +{{- end }} +{{- if eq (include "basicauth.check" .) "true" }} + - name: k10-basic-auth + secret: + secretName: {{ default "k10-basic-auth" .Values.auth.basicAuth.secretName }} +{{- end }} +{{- if .Values.auth.oidcAuth.enabled }} + - name: k10-oidc-auth + secret: + secretName: {{ default "k10-oidc-auth" .Values.auth.oidcAuth.secretName }} +{{- end }} +{{- if .Values.auth.openshift.enabled }} + - name: k10-oidc-auth + secret: + secretName: {{ default "k10-oidc-auth" .Values.auth.openshift.secretName }} +{{- end }} +{{- if .Values.auth.ldap.enabled }} + - name: k10-oidc-auth + secret: + secretName: {{ default "k10-oidc-auth" .Values.auth.ldap.secretName }} + - name: k10-logos-dex + configMap: + name: k10-logos-dex +{{- end }} +{{- range $skip, $statefulContainer := compact (dict "main" . "k10_service_pod" $pod | include "get.statefulRestServicesInPod" | splitList " ") }} + - name: {{ $statefulContainer }}-persistent-storage + persistentVolumeClaim: + claimName: {{ $statefulContainer }}-pv-claim +{{- end }} +{{- if eq (include "check.googlecreds" .) "true" }} + - name: service-account + secret: + secretName: google-secret +{{- end }} +{{- if eq (include "check.cacertconfigmap" .) "true" }} + - name: {{ .Values.cacertconfigmap.name }} + configMap: + name: {{ .Values.cacertconfigmap.name }} +{{- end }} +{{- $containersInThisPod := (dict "main" . "k10_service_pod" $pod | include "get.serviceContainersInPod" | splitList " ") }} +{{- if has "logging" $containersInThisPod }} + - name: logging-configmap-storage + configMap: + name: fluentbit-configmap +{{- end }} +{{- if and (has "config" $containersInThisPod) (.Values.injectKanisterSidecar.enabled) }} + - name: mutating-webhook-certs + secret: + secretName: {{ include "k10.configAPIs" . }}-certs +{{- end }} +{{- if and ( has "auth" $containersInThisPod) (or .Values.auth.dex.enabled (eq (include "check.dexAuth" .) "true")) }} + - name: config + configMap: + name: k10-dex + items: + - key: config.yaml + path: config.yaml +{{- if .Values.auth.ldap.enabled }} + - name: dex-config + emptyDir: {} + - name: bind-secret + secret: + secretName: {{ default "k10-dex" .Values.auth.ldap.bindPWSecretName }} +{{- end }} +{{- end }} +{{- end }}{{/* with .main */}} +{{- end }}{{/* define "k10-init-container-header" */}} diff --git a/charts/k10/k10/4.5.1400/templates/api-tls-secrets.yaml b/charts/k10/k10/4.5.1400/templates/api-tls-secrets.yaml new file mode 100644 index 000000000..6c863f7c6 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/api-tls-secrets.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.secrets.apiTlsCrt .Values.secrets.apiTlsKey }} +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: ambassador-certs +type: kubernetes.io/tls +data: + tls.crt: {{ .Values.secrets.apiTlsCrt }} + tls.key: {{ .Values.secrets.apiTlsKey }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/templates/apiservice.yaml b/charts/k10/k10/4.5.1400/templates/apiservice.yaml new file mode 100644 index 000000000..1811df48a --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/apiservice.yaml @@ -0,0 +1,25 @@ +{{/* Template to generate the aggregated APIService/Service objects */}} +{{- if .Values.apiservices.deployed -}} +{{- $main := . -}} +{{- $container_port := .Values.service.internalPort -}} +{{- $namespace := .Release.Namespace -}} +{{- range include "k10.aggregatedAPIs" . | splitList " " -}} +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v1alpha1.{{ . }}.{{ template "apiDomain" $main }} + labels: + apiserver: "true" +{{ include "helm.labels" $ | indent 4 }} +spec: + version: v1alpha1 + group: {{ . }}.{{ template "apiDomain" $main }} + groupPriorityMinimum: 2000 + service: + namespace: {{$namespace}} + name: aggregatedapis-svc + versionPriority: 10 + insecureSkipTLSVerify: true +{{ end }} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/templates/daemonsets.yaml b/charts/k10/k10/4.5.1400/templates/daemonsets.yaml new file mode 100644 index 000000000..96f2d3a88 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/daemonsets.yaml @@ -0,0 +1,26 @@ +{{- if .Values.metering.redhatMarketplacePayg }} +apiVersion: apps/v1 +kind: DaemonSet +metadata: + namespace: {{ .Release.Namespace }} + name: k10-rhmp-paygo + labels: +{{ include "helm.labels" . | indent 4 }} + component: paygo +spec: + selector: + matchLabels: +{{ include "k10.common.matchLabels" . | indent 6 }} + component: paygo + template: + metadata: + labels: +{{ include "helm.labels" . | indent 8 }} + component: paygo + spec: + containers: + - name: paygo + image: registry.access.redhat.com/ubi8/ubi-minimal:8.5-240.1648458092 + command: [ "sleep" ] + args: [ "36500d" ] +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/templates/deployments.yaml b/charts/k10/k10/4.5.1400/templates/deployments.yaml new file mode 100644 index 000000000..53ac1c8b0 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/deployments.yaml @@ -0,0 +1,30 @@ +{{/* +Generates deployment specs for K10 services and other services such as +"frontend" and "kanister". +*/}} +{{- include "singleAuth.check" . -}} +{{- $main_context := . -}} +{{- $stateless_services := include "k10.statelessServices" . | splitList " " -}} +{{- $colocated_services := include "k10.colocatedServices" . | fromYaml -}} +{{- range $skip, $k10_service := include "k10.restServices" . | splitList " " }} + {{ if not (hasKey $colocated_services $k10_service ) }} + {{/* Set $stateful for stateful services when .Values.global.persistence.enabled is true */}} + {{- $stateful := and $.Values.global.persistence.enabled (not (has $k10_service $stateless_services)) -}} + {{/* Set $replicas to .Values.executorReplicas for the exectutor service */}} + {{- $replicas := or (and (eq $k10_service "executor") $.Values.executorReplicas) 1 -}} + {{ $tmp_contx := dict "main" $main_context "k10_service" $k10_service "stateful" $stateful "replicas" $replicas }} + {{ if eq $k10_service "metering" }} + {{- include "k10-metering" $tmp_contx -}} + {{ else }} + {{- include "k10-default" $tmp_contx -}} + {{ end }} + {{ end }}{{/* if not (hasKey $colocated_services $k10_service ) */}} +{{- end }} +{{/* +Generate deployment specs for additional services. These are stateless and have +1 replica. +*/}} +{{- range $skip, $k10_service := concat (include "k10.services" . | splitList " ") (include "k10.additionalServices" . | splitList " ") }} + {{ $tmp_contx := dict "main" $main_context "k10_service" $k10_service "stateful" false "replicas" 1 }} + {{- include "k10-default" $tmp_contx -}} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/templates/fluentbit-configmap.yaml b/charts/k10/k10/4.5.1400/templates/fluentbit-configmap.yaml new file mode 100644 index 000000000..71cecb966 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/fluentbit-configmap.yaml @@ -0,0 +1,34 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: fluentbit-configmap +data: + fluentbit.conf: | + [SERVICE] + HTTP_Server On + HTTP_Listen 0.0.0.0 + HTTP_PORT 24225 + + [INPUT] + Name tcp + Listen 0.0.0.0 + Port 24224 + + [OUTPUT] + Name stdout + Match * + + [OUTPUT] + Name file + Match * + File {{ .Values.global.persistence.mountPath }}/k10.log + logrotate.conf: | + {{ .Values.global.persistence.mountPath }}/k10.log { + create + missingok + rotate 6 + size 1G + } diff --git a/charts/k10/k10/4.5.1400/templates/gateway-ext.yaml b/charts/k10/k10/4.5.1400/templates/gateway-ext.yaml new file mode 100644 index 000000000..1e21d3dba --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/gateway-ext.yaml @@ -0,0 +1,33 @@ +{{/* Externally exposed service for gateway endpoint. */}} +{{- $container_port := .Values.service.internalPort -}} +{{- if .Values.externalGateway.create -}} +{{- include "authEnabled.check" . -}} +apiVersion: v1 +kind: Service +metadata: + namespace: {{ $.Release.Namespace }} + name: gateway-ext + labels: + service: gateway + {{- if eq "route53-mapper" (default " " .Values.externalGateway.fqdn.type) }} + dns: route53 + {{- end }} +{{ include "helm.labels" . | indent 4 }} + annotations: + {{- if .Values.externalGateway.annotations }} +{{ toYaml .Values.externalGateway.annotations | indent 4 }} + {{- end }} +{{ include "dnsAnnotations" . | indent 4 }} + {{- if .Values.externalGateway.awsSSLCertARN }} + service.beta.kubernetes.io/aws-load-balancer-ssl-cert: {{ .Values.externalGateway.awsSSLCertARN }} + service.beta.kubernetes.io/aws-load-balancer-ssl-ports: https + {{- end }} +spec: + type: LoadBalancer + ports: + - name: https + port: {{ if or (and .Values.secrets.apiTlsCrt .Values.secrets.apiTlsKey) .Values.externalGateway.awsSSLCertARN }}443{{ else }}80{{ end }} + targetPort: {{ $container_port }} + selector: + service: gateway +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/templates/gateway.yaml b/charts/k10/k10/4.5.1400/templates/gateway.yaml new file mode 100644 index 000000000..7ff17fa9c --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/gateway.yaml @@ -0,0 +1,144 @@ +{{- $container_port := .Values.service.internalPort -}} +{{- $service_port := .Values.service.externalPort -}} +{{- $admin_port := default 8877 .Values.service.gatewayAdminPort -}} +--- +apiVersion: v1 +kind: Service +metadata: + namespace: {{ $.Release.Namespace }} + labels: + service: gateway +{{ include "helm.labels" . | indent 4 }} + name: gateway + annotations: + getambassador.io/config: | + --- + apiVersion: getambassador.io/v3alpha1 + kind: AuthService + name: authentication + auth_service: "auth-svc:8000" + path_prefix: "/v0/authz" + allowed_request_headers: + - "x-forwarded-access-token" + --- + apiVersion: getambassador.io/v3alpha1 + kind: Host + name: ambassadorhost + hostname: "*" +{{- if and .Values.secrets.apiTlsCrt .Values.secrets.apiTlsKey }} + tlsSecret: + name: ambassador-certs +{{- end }} + requestPolicy: + insecure: + action: Route + --- + apiVersion: getambassador.io/v3alpha1 + kind: Listener + name: ambassadorlistener + port: {{ $container_port }} + securityModel: XFP + protocol: HTTPS + hostBinding: + namespace: + from: SELF + --- +{{- if (eq "endpoint" .Values.apigateway.serviceResolver) }} + apiVersion: getambassador.io/v3alpha1 + kind: KubernetesEndpointResolver + name: endpoint + --- +{{- end }} + apiVersion: getambassador.io/v3alpha1 + kind: Module + name: ambassador + config: + service_port: {{ $container_port }} +{{- if (eq "endpoint" .Values.apigateway.serviceResolver) }} + resolver: endpoint + load_balancer: + policy: round_robin +{{- end }} +spec: + ports: + - name: http + port: {{ $service_port }} + targetPort: {{ $container_port }} + selector: + service: gateway +--- +{{- if .Values.gateway.exposeAdminPort }} +apiVersion: v1 +kind: Service +metadata: + namespace: {{ $.Release.Namespace }} + name: gateway-admin + labels: + service: gateway +{{ include "helm.labels" . | indent 4 }} +spec: + ports: + - name: metrics + port: {{ $admin_port }} + targetPort: {{ $admin_port }} + selector: + service: gateway +--- +{{- end }} +apiVersion: apps/v1 +kind: Deployment +metadata: + namespace: {{ $.Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} + component: gateway + name: gateway +spec: + replicas: 1 + selector: + matchLabels: + service: gateway + template: + metadata: + annotations: + checksum/config: {{ include (print .Template.BasePath "/k10-config.yaml") . | sha256sum }} + checksum/secret: {{ include (print .Template.BasePath "/secrets.yaml") . | sha256sum }} + labels: + service: gateway + component: gateway +{{ include "helm.labels" . | indent 8 }} + spec: + serviceAccountName: {{ template "serviceAccountName" . }} + {{- include "k10.imagePullSecrets" . | indent 6 }} + containers: + - name: ambassador + image: {{ include "k10.ambImage" . }} + resources: + limits: + cpu: 1000m + memory: 1Gi + requests: + cpu: 200m + memory: 300Mi + env: + - name: AMBASSADOR_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: AMBASSADOR_SINGLE_NAMESPACE + value: "true" + - name: "AMBASSADOR_VERIFY_SSL_FALSE" + value: {{ .Values.gateway.insecureDisableSSLVerify | quote }} + livenessProbe: + httpGet: + path: /ambassador/v0/check_alive + port: {{ $admin_port }} + initialDelaySeconds: 30 + periodSeconds: 3 + readinessProbe: + httpGet: + path: /ambassador/v0/check_ready + port: {{ $admin_port }} + initialDelaySeconds: 30 + periodSeconds: 3 + restartPolicy: Always diff --git a/charts/k10/k10/4.5.1400/templates/grafana-scc.yaml b/charts/k10/k10/4.5.1400/templates/grafana-scc.yaml new file mode 100644 index 000000000..f634498a4 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/grafana-scc.yaml @@ -0,0 +1,44 @@ +{{- if .Values.scc.create }} +{{- if .Values.grafana.enabled }} +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ .Release.Name }}-grafana +allowPrivilegedContainer: false +allowHostNetwork: false +allowHostDirVolumePlugin: true +priority: null +allowedCapabilities: null +allowHostPorts: true +allowHostPID: false +allowHostIPC: false +readOnlyRootFilesystem: false +requiredDropCapabilities: + - KILL + - MKNOD + - SETUID + - SETGID +defaultAddCapabilities: [] +allowedCapabilities: [] +priority: 0 +runAsUser: + type: RunAsAny +seLinuxContext: + type: RunAsAny +fsGroup: + type: RunAsAny +supplementalGroups: + type: RunAsAny +volumes: + - configMap + - downwardAPI + - emptyDir + - persistentVolumeClaim + - projected + - secret +users: + - system:serviceaccount:{{.Release.Namespace}}:{{.Release.Name}}-grafana +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/templates/ingress.yaml b/charts/k10/k10/4.5.1400/templates/ingress.yaml new file mode 100644 index 000000000..48efc0530 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/ingress.yaml @@ -0,0 +1,46 @@ +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $service_port := .Values.service.externalPort -}} +{{ if .Values.ingress.create }} +{{ include "authEnabled.check" . }} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: {{ .Release.Name }}-ingress + annotations: +{{ include "ingressClassAnnotation" . | indent 4 }} + {{- if and .Values.secrets.apiTlsCrt .Values.secrets.apiTlsKey }} + nginx.ingress.kubernetes.io/secure-backends: "true" + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + {{- end }} + {{- if .Values.ingress.annotations }} +{{ toYaml .Values.ingress.annotations | indent 4 }} + {{- end }} +spec: +{{- if .Values.ingress.tls.enabled }} + tls: + - hosts: + - {{ required "ingress.host value is required for TLS configuration" .Values.ingress.host }} + secretName: {{ required "ingress.tls.secretName is required for TLS configuration" .Values.ingress.tls.secretName }} +{{- end }} + rules: + - http: + paths: + - path: /{{ default .Release.Name .Values.ingress.urlPath | trimPrefix "/" | trimSuffix "/" }}/ + pathType: {{ default "ImplementationSpecific" .Values.ingress.pathType }} + backend: + {{- if $ingressApiIsStable }} + service: + name: gateway + port: + number: {{ $service_port }} + {{- else }} + serviceName: gateway + servicePort: {{ $service_port }} + {{- end }} + {{- if .Values.ingress.host }} + host: {{ .Values.ingress.host }} + {{- end }} +{{ end }} diff --git a/charts/k10/k10/4.5.1400/templates/k10-config.yaml b/charts/k10/k10/4.5.1400/templates/k10-config.yaml new file mode 100644 index 000000000..2067261b2 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/k10-config.yaml @@ -0,0 +1,230 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-config +data: + loglevel: {{ .Values.logLevel | quote }} + {{- if .Values.clusterName }} + clustername: {{ quote .Values.clusterName }} + {{- end }} + version: {{ .Chart.AppVersion }} + multiClusterVersion: {{ include "k10.multiClusterVersion" . | quote }} + modelstoredirname: "//mnt/k10state/kasten-io/" + apiDomain: {{ include "apiDomain" . }} + concurrentSnapConversions: {{ include "k10.defaultConcurrentSnapshotConversions" . | quote }} + concurrentWorkloadSnapshots: {{ include "k10.defaultConcurrentWorkloadSnapshots" . | quote }} + k10DataStoreParallelUpload: {{ include "k10.defaultK10DataStoreParallelUpload" . | quote }} + k10DataStoreGeneralContentCacheSizeMB: {{ include "k10.defaultK10DataStoreGeneralContentCacheSizeMB" . | quote }} + k10DataStoreGeneralMetadataCacheSizeMB: {{ include "k10.defaultK10DataStoreGeneralMetadataCacheSizeMB" . | quote }} + k10DataStoreRestoreContentCacheSizeMB: {{ include "k10.defaultK10DataStoreRestoreContentCacheSizeMB" . | quote }} + k10DataStoreRestoreMetadataCacheSizeMB: {{ include "k10.defaultK10DataStoreRestoreMetadataCacheSizeMB" . | quote }} + K10BackupBufferFileHeadroomFactor: {{ include "k10.defaultK10BackupBufferFileHeadroomFactor" . | quote }} + AWSAssumeRoleDuration: {{ default (include "k10.defaultAssumeRoleDuration" .) .Values.awsConfig.assumeRoleDuration | quote }} + KanisterBackupTimeout: {{ default (include "k10.defaultKanisterBackupTimeout" .) .Values.kanister.backupTimeout | quote }} + KanisterRestoreTimeout: {{ default (include "k10.defaultKanisterRestoreTimeout" .) .Values.kanister.restoreTimeout | quote }} + KanisterDeleteTimeout: {{ default (include "k10.defaultKanisterDeleteTimeout" .) .Values.kanister.deleteTimeout | quote }} + KanisterHookTimeout: {{ default (include "k10.defaultKanisterHookTimeout" .) .Values.kanister.hookTimeout | quote }} + KanisterCheckRepoTimeout: {{ default (include "k10.defaultKanisterCheckRepoTimeout" .) .Values.kanister.checkRepoTimeout | quote }} + KanisterStatsTimeout: {{ default (include "k10.defaultKanisterStatsTimeout" .) .Values.kanister.statsTimeout | quote }} + KanisterEFSPostRestoreTimeout: {{ default (include "k10.defaultKanisterEFSPostRestoreTimeout" .) .Values.kanister.efsPostRestoreTimeout | quote }} + KanisterPodReadyWaitTimeout: {{ .Values.kanister.podReadyWaitTimeout | quote }} + K10MutatingWebhookTLSCertDir: "/etc/ssl/certs/webhook" + + K10LimiterGenericVolumeSnapshots: {{ default (include "k10.defaultK10LimiterGenericVolumeSnapshots" .) .Values.limiter.genericVolumeSnapshots | quote }} + K10LimiterGenericVolumeCopies: {{ default (include "k10.defaultK10LimiterGenericVolumeCopies" .) .Values.limiter.genericVolumeCopies | quote }} + K10LimiterGenericVolumeRestores: {{ default (include "k10.defaultK10LimiterGenericVolumeRestores" .) .Values.limiter.genericVolumeRestores | quote }} + K10LimiterCsiSnapshots: {{ default (include "k10.defaultK10LimiterCsiSnapshots" .) .Values.limiter.csiSnapshots | quote }} + K10LimiterProviderSnapshots: {{ default (include "k10.defaultK10LimiterProviderSnapshots" .) .Values.limiter.providerSnapshots | quote }} + + {{- if .Values.awsConfig.efsBackupVaultName }} + efsBackupVaultName: {{ quote .Values.awsConfig.efsBackupVaultName }} + {{- end }} + + {{- if .Values.vmWare.taskTimeoutMin }} + vmWareTaskTimeoutMin: {{ quote .Values.vmWare.taskTimeoutMin }} + {{- end }} + +{{- include "get.kanisterPodCustomLabels" . | indent 2}} +{{- include "get.kanisterPodCustomAnnotations" . | indent 2}} + + {{- if .Values.kanisterFunctionVersion }} + kanisterFunctionVersion: {{ .Values.kanisterFunctionVersion | quote }} + {{- else }} + kanisterFunctionVersion: {{ quote "v1.0.0-alpha" }} + {{- end }} + {{- if eq "true" (include "overwite.kanisterToolsImage" .) }} + {{- if (include "get.kanisterToolsImage" .) }} + overwriteKanisterTools: {{ include "get.kanisterToolsImage" .}} + {{- end }} + {{- end }} +{{- include "kanisterToolsResources" . | indent 2 }} + +{{ if .Values.features }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-features +data: +{{ include "k10.features" . | indent 2}} +{{ end }} +{{ if .Values.auth.dex.enabled }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-dex + namespace: {{ .Release.Namespace }} +data: + config.yaml: | + issuer: {{ .Values.auth.oidcAuth.providerURL }} + storage: + type: memory + web: + http: 0.0.0.0:8080 + logger: + level: info + format: text + connectors: + - type: oidc + id: google + name: Google + config: + issuer: {{ .Values.auth.dex.providerURL }} + clientID: {{ .Values.auth.oidcAuth.clientID }} + clientSecret: {{ .Values.auth.oidcAuth.clientSecret }} + redirectURI: {{ .Values.auth.dex.redirectURL }} + scopes: + - openid + - profile + - email + oauth2: + skipApprovalScreen: true + staticClients: + - name: 'K10' + id: {{ .Values.auth.oidcAuth.clientID }} + secret: {{ .Values.auth.oidcAuth.clientSecret }} + redirectURIs: + - {{ printf "%s/k10/auth-svc/v0/oidc/redirect" .Values.auth.oidcAuth.redirectURL }} + enablePasswordDB: true + staticPasswords: +{{ end }} +{{ if .Values.auth.openshift.enabled }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-dex + namespace: {{ .Release.Namespace }} +data: + config.yaml: | + issuer: {{ printf "%s/dex" (trimSuffix "/" .Values.auth.openshift.dashboardURL) }} + storage: + type: memory + web: + http: 0.0.0.0:8080 + logger: + level: info + format: text + connectors: + - type: openshift + id: openshift + name: OpenShift + config: + issuer: {{ .Values.auth.openshift.openshiftURL }} + clientID: {{printf "system:serviceaccount:%s:%s" .Release.Namespace .Values.auth.openshift.serviceAccount }} + clientSecret: {{ .Values.auth.openshift.clientSecret }} + redirectURI: {{ printf "%s/dex/callback" (trimSuffix "/" .Values.auth.openshift.dashboardURL) }} + insecureCA: {{ .Values.auth.openshift.insecureCA }} +{{- if and (eq (include "check.cacertconfigmap" .) "false") .Values.auth.openshift.useServiceAccountCA }} + rootCA: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt +{{- end }} + oauth2: + skipApprovalScreen: true + staticClients: + - name: 'K10' + id: kasten + secret: kastensecret + redirectURIs: + - {{ printf "%s/auth-svc/v0/oidc/redirect" (trimSuffix "/" .Values.auth.openshift.dashboardURL) }} +{{ end }} +{{ if .Values.auth.ldap.enabled }} +--- +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-dex + namespace: {{ .Release.Namespace }} +data: + config.yaml: | + issuer: {{ printf "%s/dex" (trimSuffix "/" .Values.auth.ldap.dashboardURL) }} + storage: + type: memory + web: + http: 0.0.0.0:8080 + frontend: + theme: custom + logoURL: theme/kasten-logo.svg + logger: + level: info + format: text + connectors: + - type: ldap + id: ldap + name: LDAP + config: + host: {{ .Values.auth.ldap.host }} + insecureNoSSL: {{ .Values.auth.ldap.insecureNoSSL }} + insecureSkipVerify: {{ .Values.auth.ldap.insecureSkipVerifySSL }} + startTLS: {{ .Values.auth.ldap.startTLS }} + bindDN: {{ .Values.auth.ldap.bindDN }} + bindPW: BIND_PASSWORD_PLACEHOLDER + userSearch: + baseDN: {{ .Values.auth.ldap.userSearch.baseDN }} + filter: {{ .Values.auth.ldap.userSearch.filter }} + username: {{ .Values.auth.ldap.userSearch.username }} + idAttr: {{ .Values.auth.ldap.userSearch.idAttr }} + emailAttr: {{ .Values.auth.ldap.userSearch.emailAttr }} + nameAttr: {{ .Values.auth.ldap.userSearch.nameAttr }} + preferredUsernameAttr: {{ .Values.auth.ldap.userSearch.preferredUsernameAttr }} + groupSearch: + baseDN: {{ .Values.auth.ldap.groupSearch.baseDN }} + filter: {{ .Values.auth.ldap.groupSearch.filter }} + nameAttr: {{ .Values.auth.ldap.groupSearch.nameAttr }} +{{- with .Values.auth.ldap.groupSearch.userMatchers }} + userMatchers: +{{ toYaml . | indent 10 }} +{{- end }} + oauth2: + skipApprovalScreen: true + staticClients: + - name: 'K10' + id: kasten + secret: kastensecret + redirectURIs: + - {{ printf "%s/auth-svc/v0/oidc/redirect" (trimSuffix "/" .Values.auth.ldap.dashboardURL) }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: k10-logos-dex + namespace: {{ .Release.Namespace }} +binaryData: + {{- $files := .Files }} + {{- range tuple "files/favicon.png" "files/kasten-logo.svg" "files/styles.css" }} + {{ trimPrefix "files/" . }}: |- + {{ $files.Get . | b64enc }} + {{- end }} +{{ end }} diff --git a/charts/k10/k10/4.5.1400/templates/k10-eula.yaml b/charts/k10/k10/4.5.1400/templates/k10-eula.yaml new file mode 100644 index 000000000..21e251d6c --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/k10-eula.yaml @@ -0,0 +1,21 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-eula +data: + text: {{ .Files.Get "eula.txt" | quote }} +--- +{{ if .Values.eula.accept }} +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-eula-info +data: +{{ include "k10.eula.fields" . | indent 2 }} +{{ end }} diff --git a/charts/k10/k10/4.5.1400/templates/kopia-tls-certs.yaml b/charts/k10/k10/4.5.1400/templates/kopia-tls-certs.yaml new file mode 100644 index 000000000..ac0635f51 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/kopia-tls-certs.yaml @@ -0,0 +1,33 @@ +# alternate names of the services. This renders to: [ component-svc.namespace, component-svc.namespace.svc ] +{{- $altNamesKopia := list ( printf "%s-svc.%s" "data-mover" .Release.Namespace ) ( printf "%s-svc.%s.svc" "data-mover" .Release.Namespace ) }} +# generate ca cert with 365 days of validity +{{- $caKopia := genCA ( printf "%s-svc-ca" "data-mover" ) 365 }} +# generate cert with CN="component-svc", SAN=$altNames and with 365 days of validity +{{- $certKopia := genSignedCert ( printf "%s-svc" "data-mover" ) nil $altNamesKopia 365 $caKopia }} +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: kopia-tls-cert + labels: +{{ include "helm.labels" . | indent 4 }} +{{- if .Values.global.rhMarketPlace }} + annotations: + "helm.sh/hook": "pre-install" +{{- end }} +data: + tls.crt: {{ $certKopia.Cert | b64enc }} +--- +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: kopia-tls-key + labels: +{{ include "helm.labels" . | indent 4 }} +{{- if .Values.global.rhMarketPlace }} + annotations: + "helm.sh/hook": "pre-install" +{{- end }} +data: + tls.key: {{ $certKopia.Key | b64enc }} diff --git a/charts/k10/k10/4.5.1400/templates/license.yaml b/charts/k10/k10/4.5.1400/templates/license.yaml new file mode 100644 index 000000000..f409fb7e5 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/license.yaml @@ -0,0 +1,25 @@ +{{- if not ( or ( .Values.license ) ( .Values.metering.awsMarketplace ) ( .Values.metering.awsManagedLicense ) ( .Values.metering.licenseConfigSecretName ) ) }} +{{- if .Files.Get "triallicense" }} +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-trial-license +type: Opaque +data: + license: {{ print (.Files.Get "triallicense") }} +{{- end }} +{{- end }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-license +type: Opaque +data: + license: {{ include "k10.getlicense" . }} diff --git a/charts/k10/k10/4.5.1400/templates/mutatingwebhook.yaml b/charts/k10/k10/4.5.1400/templates/mutatingwebhook.yaml new file mode 100644 index 000000000..36d7da875 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/mutatingwebhook.yaml @@ -0,0 +1,51 @@ +{{- if .Values.injectKanisterSidecar.enabled -}} +# alternate names of the services. This renders to: [ component-svc.namespace, component-svc.namespace.svc ] +{{- $altNames := list ( printf "%s-svc.%s" (include "k10.configAPIs" .) .Release.Namespace ) ( printf "%s-svc.%s.svc" (include "k10.configAPIs" .) .Release.Namespace ) }} +# generate ca cert with 365 days of validity +{{- $ca := genCA ( printf "%s-svc-ca" (include "k10.configAPIs" .) ) 365 }} +# generate cert with CN="component-svc", SAN=$altNames and with 365 days of validity +{{- $cert := genSignedCert ( printf "%s-svc" (include "k10.configAPIs" .) ) nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: {{ include "k10.configAPIs" . }}-certs + labels: +{{ include "helm.labels" . | indent 4 }} +data: + tls.crt: {{ $cert.Cert | b64enc }} + tls.key: {{ $cert.Key | b64enc }} +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-sidecar-injector +webhooks: +- name: k10-sidecar-injector.kasten.io + admissionReviewVersions: ["v1", "v1beta1"] + failurePolicy: Ignore + sideEffects: None + clientConfig: + service: + name: config-svc + namespace: {{ .Release.Namespace }} + path: "/k10/mutate" + port: 443 + caBundle: {{ b64enc $ca.Cert }} + rules: + - operations: ["CREATE", "UPDATE"] + apiGroups: ["*"] + apiVersions: ["v1"] + resources: ["deployments", "statefulsets", "deploymentconfigs"] +{{- if .Values.injectKanisterSidecar.namespaceSelector }} + namespaceSelector: +{{ toYaml .Values.injectKanisterSidecar.namespaceSelector | indent 4 }} +{{- end }} +{{- if .Values.injectKanisterSidecar.objectSelector }} + objectSelector: +{{ toYaml .Values.injectKanisterSidecar.objectSelector | indent 4 }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/templates/networkpolicy.yaml b/charts/k10/k10/4.5.1400/templates/networkpolicy.yaml new file mode 100644 index 000000000..2cd4dae9f --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/networkpolicy.yaml @@ -0,0 +1,192 @@ +{{- $admin_port := default 8877 .Values.service.gatewayAdminPort -}} +{{- $mutating_webhook_port := default 8080 .Values.injectKanisterSidecar.webhookServer.port -}} +{{- if .Values.networkPolicy.create }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: default-deny + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: {} + policyTypes: + - Ingress +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: cross-services-allow + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: + matchLabels: + release: {{ .Release.Name }} + ingress: + - from: + - podSelector: + matchLabels: + release: {{ .Release.Name }} + ports: + - protocol: TCP + port: {{ .Values.service.externalPort }} +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: logging-allow-internal + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: + matchLabels: + release: {{ .Release.Name }} + run: logging-svc + ingress: + - from: + - podSelector: + matchLabels: + release: {{ .Release.Name }} + ports: + # Logging input port + - protocol: TCP + port: 24224 + - protocol: TCP + port: 24225 +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-external + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: + matchLabels: + service: gateway + release: {{ .Release.Name }} + ingress: + - from: [] + ports: + - protocol: TCP + port: 8000 +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-all-api + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: + matchLabels: + run: aggregatedapis-svc + release: {{ .Release.Name }} + ingress: + - from: + ports: + - protocol: TCP + port: {{ .Values.service.aggregatedApiPort }} +{{- if .Values.gateway.exposeAdminPort }} +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-gateway-admin + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: + matchLabels: + release: {{ .Release.Name }} + service: gateway + ingress: + - from: + - podSelector: + matchLabels: + app: prometheus + component: server + release: {{ .Release.Name }} + ports: + - protocol: TCP + port: {{ $admin_port }} +{{- end -}} +{{- if .Values.injectKanisterSidecar.enabled }} +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: allow-mutating-webhook + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: + matchLabels: + release: {{ .Release.Name }} + run: config-svc + ingress: + - from: + ports: + - protocol: TCP + port: {{ $mutating_webhook_port }} +{{- end -}} +{{- if or .Values.auth.dex.enabled (eq (include "check.dexAuth" .) "true") }} +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: gateway-dex-allow + namespace: {{ .Release.Namespace }} + labels: +{{ include "helm.labels" . | indent 4 }} +spec: + podSelector: + matchLabels: + release: {{ .Release.Name }} + run: auth-svc + ingress: + - from: + - podSelector: + matchLabels: + service: gateway + release: {{ .Release.Name }} + ports: + - protocol: TCP + port: 8080 +{{- end -}} +{{- $mainCtx := . }} +{{- $colocatedList := include "get.enabledColocatedSvcList" . | fromYaml }} +{{- range $primary, $secondaryList := $colocatedList }} +--- +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ $primary }}-svc-allow-secondary-services + namespace: {{ $mainCtx.Release.Namespace }} + labels: +{{ include "helm.labels" $mainCtx | indent 4 }} +spec: + podSelector: + matchLabels: + release: {{ $mainCtx.Release.Name }} + run: {{ $primary }}-svc + ingress: + - from: + - podSelector: + matchLabels: + release: {{ $mainCtx.Release.Name }} + ports: + {{- range $skip, $secondary := $secondaryList }} + {{- $colocConfig := index (include "k10.colocatedServices" . | fromYaml) $secondary }} + - protocol: TCP + port: {{ $colocConfig.port }} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/templates/prometheus-configmap.yaml b/charts/k10/k10/4.5.1400/templates/prometheus-configmap.yaml new file mode 100644 index 000000000..55c44c96d --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/prometheus-configmap.yaml @@ -0,0 +1,70 @@ +{{ $scrape_services := (include "k10.restServices" . | splitList " " ) }} +{{- if .Values.gateway.exposeAdminPort -}} + {{- $scrape_services = append (include "k10.restServices" . | splitList " " ) "gateway" -}} +{{- end -}} + +{{- include "check.validateMonitoringProperties" .}} +{{- if .Values.prometheus.server.enabled -}} +{{- $rbac := .Values.prometheus.rbac.create -}} +kind: ConfigMap +apiVersion: v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: {{ .Release.Name }}-{{ .Values.prometheus.server.configMapOverrideName }} +data: + prometheus.yml: | + global: + scrape_interval: 1m + scrape_timeout: 10s + evaluation_interval: 1m + scrape_configs: +{{- range $scrape_services -}} +{{- if or (not (hasKey $.Values.optionalColocatedServices .)) (index $.Values.optionalColocatedServices .).enabled }} +{{ $tmpcontx := dict "main" $ "k10service" . -}} +{{ include "k10.prometheusScrape" $tmpcontx | indent 6 -}} +{{- end }} +{{- end }} +{{- range include "k10.services" . | splitList " " }} +{{- if (or (ne . "aggregatedapis") ($rbac)) }} +{{ $tmpcontx := dict "main" $ "k10service" . -}} +{{ include "k10.prometheusScrape" $tmpcontx | indent 6 -}} +{{- end }} +{{- end }} +{{- range include "k10.additionalServices" . | splitList " " }} +{{- if not (eq . "frontend") }} +{{ $tmpcontx := dict "main" $ "k10service" . -}} +{{ include "k10.prometheusScrape" $tmpcontx | indent 6 -}} +{{- end }} +{{- end }} +{{- if .Values.prometheus.extraScrapeConfigs }} +{{ .Values.prometheus.extraScrapeConfigs | indent 6 }} +{{- end -}} +{{- if .Values.prometheus.scrapeCAdvisor }} + - job_name: 'kubernetes-cadvisor' + scheme: https + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + kubernetes_sd_configs: + - role: node + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor +{{- end}} + - job_name: prometheus + metrics_path: {{ .Values.prometheus.server.baseURL }}metrics + static_configs: + - targets: + - "localhost:9090" + labels: + app: prometheus + component: server +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/templates/prometheus-service.yaml b/charts/k10/k10/4.5.1400/templates/prometheus-service.yaml new file mode 100644 index 000000000..c916472c3 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/prometheus-service.yaml @@ -0,0 +1,45 @@ +{{/* Template to generate service spec for v0 rest services */}} +{{- if .Values.prometheus.server.enabled -}} +{{- $postfix := default .Release.Name .Values.ingress.urlPath -}} +{{- $os_postfix := default .Release.Name .Values.route.path -}} +{{- $service_port := .Values.prometheus.server.service.servicePort -}} +apiVersion: v1 +kind: Service +metadata: + namespace: {{ .Release.Namespace }} + name: {{ include "k10.prometheus.service.name" . }}-exp + labels: +{{ include "helm.labels" $ | indent 4 }} + component: {{ include "k10.prometheus.service.name" . }} + run: {{ include "k10.prometheus.service.name" . }} + annotations: + getambassador.io/config: | + --- + apiVersion: getambassador.io/v3alpha1 + kind: Mapping + name: {{ include "k10.prometheus.service.name" . }}-mapping + {{- if .Values.prometheus.server.baseURL }} + rewrite: /{{ .Values.prometheus.server.baseURL | trimPrefix "/" | trimSuffix "/" }}/ + {{- else }} + rewrite: / + {{- end }} + {{- if .Values.route.enabled }} + prefix: /{{ $os_postfix | trimPrefix "/" | trimSuffix "/" }}/prometheus/ + {{- else }} + prefix: /{{ $postfix | trimPrefix "/" | trimSuffix "/" }}/prometheus/ + {{- end }} + service: {{ include "k10.prometheus.service.name" . }}:{{ $service_port }} + timeout_ms: 15000 + hostname: "*" + +spec: + ports: + - name: http + protocol: TCP + port: {{ $service_port }} + targetPort: 9090 + selector: + app: {{ include "k10.prometheus.name" . }} + component: {{ .Values.prometheus.server.name }} + release: {{ .Release.Name }} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/templates/rbac.yaml b/charts/k10/k10/4.5.1400/templates/rbac.yaml new file mode 100644 index 000000000..2b510067d --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/rbac.yaml @@ -0,0 +1,239 @@ +{{- $main := . -}} +{{- $apiDomain := include "apiDomain" . -}} + +{{- $actionsAPIs := splitList " " (include "k10.actionsAPIs" .) -}} +{{- $aggregatedAPIs := splitList " " (include "k10.aggregatedAPIs" .) -}} +{{- $appsAPIs := splitList " " (include "k10.appsAPIs" .) -}} +{{- $authAPIs := splitList " " (include "k10.authAPIs" .) -}} +{{- $configAPIs := splitList " " (include "k10.configAPIs" .) -}} +{{- $distAPIs := splitList " " (include "k10.distAPIs" .) -}} +{{- $reportingAPIs := splitList " " (include "k10.reportingAPIs" .) -}} + +{{- if .Values.rbac.create }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ .Release.Namespace }}-{{ template "serviceAccountName" . }}-cluster-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: {{ template "serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- if not ( eq (include "meteringServiceAccountName" .) (include "serviceAccountName" .) )}} +- kind: ServiceAccount + name: {{ template "meteringServiceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} +{{ include "k10.defaultRBACLabels" . | indent 4 }} + name: {{ .Release.Name }}-admin +rules: +- apiGroups: +{{- range sortAlpha (concat $aggregatedAPIs $configAPIs $reportingAPIs) }} + - {{ . }}.{{ $apiDomain }} +{{- end }} + resources: + - "*" + verbs: + - "*" +- apiGroups: + - cr.kanister.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - "" + resources: + - namespaces + verbs: + - create + - get + - list +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} +{{ include "k10.defaultRBACLabels" . | indent 4 }} + name: {{ .Release.Name }}-ns-admin + namespace: {{ .Release.Namespace }} +rules: +- apiGroups: + - "" + - "apps" + resources: + - deployments + - pods + verbs: + - get + - list +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - update +- apiGroups: + - "batch" + resources: + - jobs + verbs: + - get +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} +{{ include "k10.defaultRBACLabels" . | indent 4 }} + name: {{ .Release.Name }}-mc-admin +rules: +- apiGroups: +{{- range sortAlpha (concat $authAPIs $configAPIs $distAPIs) }} + - {{ . }}.{{ $apiDomain }} +{{- end }} + resources: + - "*" + verbs: + - "*" +- apiGroups: + - "" + resources: + - secrets + verbs: + - "*" +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} +{{ include "k10.defaultRBACLabels" . | indent 4 }} + name: {{ .Release.Name }}-basic +rules: +- apiGroups: +{{- range sortAlpha $actionsAPIs }} + - {{ . }}.{{ $apiDomain }} +{{- end }} + resources: + - {{ include "k10.backupActions" $main}} + - {{ include "k10.backupActionsDetails" $main}} + - {{ include "k10.restoreActions" $main}} + - {{ include "k10.restoreActionsDetails" $main}} + - {{ include "k10.exportActions" $main}} + - {{ include "k10.exportActionsDetails" $main}} + - {{ include "k10.cancelActions" $main}} + verbs: + - "*" +- apiGroups: +{{- range sortAlpha $appsAPIs }} + - {{ . }}.{{ $apiDomain }} +{{- end }} + resources: + - {{ include "k10.restorePoints" $main}} + - {{ include "k10.restorePointsDetails" $main}} + - {{ include "k10.applications" $main}} + - {{ include "k10.applicationsDetails" $main}} + verbs: + - "*" +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get +- apiGroups: +{{- range sortAlpha $configAPIs }} + - {{ . }}.{{ $apiDomain }} +{{- end }} + resources: + - {{ include "k10.policies" $main}} + verbs: + - "*" +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} +{{ include "k10.defaultRBACLabels" . | indent 4 }} + name: {{ .Release.Name }}-config-view +rules: +- apiGroups: +{{- range sortAlpha $configAPIs }} + - {{ . }}.{{ $apiDomain }} +{{- end }} + resources: + - {{ include "k10.profiles" $main}} + - {{ include "k10.policies" $main}} + verbs: + - get + - list +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ .Release.Namespace }}-{{ template "serviceAccountName" . }}-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Name }}-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: k10:admins +{{- range .Values.auth.k10AdminUsers }} +- apiGroup: rbac.authorization.k8s.io + kind: User + name: {{ . }} +{{- end }} +{{- range default .Values.auth.groupAllowList .Values.auth.k10AdminGroups }} +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: {{ . }} +{{- end }} +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ .Release.Namespace }}-{{ template "serviceAccountName" . }}-ns-admin + namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ .Release.Name }}-ns-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: k10:admins +{{- range .Values.auth.k10AdminUsers }} +- apiGroup: rbac.authorization.k8s.io + kind: User + name: {{ . }} +{{- end }} +{{- range default .Values.auth.groupAllowList .Values.auth.k10AdminGroups }} +- apiGroup: rbac.authorization.k8s.io + kind: Group + name: {{ . }} +{{- end }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/templates/route.yaml b/charts/k10/k10/4.5.1400/templates/route.yaml new file mode 100644 index 000000000..1ecd244be --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/route.yaml @@ -0,0 +1,36 @@ +{{- $route := .Values.route -}} +{{- if $route.enabled -}} +{{ include "authEnabled.check" . }} +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + name: {{ .Release.Name }}-route + {{- with $route.annotations }} + namespace: {{ .Release.Namespace }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: +{{ include "helm.labels" . | indent 4 }} + {{- with $route.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + host: {{ $route.host }} + path: /{{ default .Release.Name $route.path | trimPrefix "/" | trimSuffix "/" }}/ + port: + targetPort: http + to: + kind: Service + name: gateway + weight: 100 + {{- if $route.tls.enabled }} + tls: + {{- if $route.tls.insecureEdgeTerminationPolicy }} + insecureEdgeTerminationPolicy: {{ $route.tls.insecureEdgeTerminationPolicy }} + {{- end }} + {{- if $route.tls.termination }} + termination: {{ $route.tls.termination }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/charts/k10/k10/4.5.1400/templates/scc.yaml b/charts/k10/k10/4.5.1400/templates/scc.yaml new file mode 100644 index 000000000..df12af4e3 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/scc.yaml @@ -0,0 +1,43 @@ +{{- if .Values.scc.create }} +kind: SecurityContextConstraints +apiVersion: security.openshift.io/v1 +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ .Release.Name }}-prometheus-server +allowPrivilegedContainer: false +allowHostNetwork: false +allowHostDirVolumePlugin: true +priority: null +allowedCapabilities: null +allowHostPorts: true +allowHostPID: false +allowHostIPC: false +readOnlyRootFilesystem: false +requiredDropCapabilities: +- CHOWN +- KILL +- MKNOD +- SETUID +- SETGID +defaultAddCapabilities: [] +allowedCapabilities: [] +priority: 0 +runAsUser: + type: MustRunAsNonRoot +seLinuxContext: + type: RunAsAny +fsGroup: + type: RunAsAny +supplementalGroups: + type: RunAsAny +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret +users: + - system:serviceaccount:{{.Release.Namespace}}:prometheus-server +{{- end }} diff --git a/charts/k10/k10/4.5.1400/templates/secrets.yaml b/charts/k10/k10/4.5.1400/templates/secrets.yaml new file mode 100644 index 000000000..ac309e717 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/secrets.yaml @@ -0,0 +1,242 @@ +{{- include "enforce.singlecloudcreds" . -}} +{{- include "check.validateImagePullSecrets" . -}} +{{- if eq (include "check.awscreds" . ) "true" }} +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: aws-creds +type: Opaque +data: + aws_access_key_id: {{ required "secrets.awsAccessKeyId field is required!" .Values.secrets.awsAccessKeyId | b64enc | quote }} + aws_secret_access_key: {{ required "secrets.awsSecretAccessKey field is required!" .Values.secrets.awsSecretAccessKey | b64enc | quote }} +{{- if .Values.secrets.awsIamRole }} + role: {{ .Values.secrets.awsIamRole | trim | b64enc | quote }} +{{- end }} +{{- end }} +{{- if or .Values.secrets.dockerConfig .Values.secrets.dockerConfigPath }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: k10-ecr +type: kubernetes.io/dockerconfigjson +data: + .dockerconfigjson: {{ or .Values.secrets.dockerConfig ( .Values.secrets.dockerConfigPath | b64enc ) }} +{{- end }} +{{- if eq (include "check.googlecreds" .) "true" }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: google-secret +type: Opaque +data: + kasten-gke-sa.json: {{ .Values.secrets.googleApiKey }} +{{- end }} +{{- if eq (include "check.ibmslcreds" .) "true" }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: ibmsl-secret +type: Opaque +data: + ibm_sl_key: {{ required "secrets.ibmSoftLayerApiKey field is required!" .Values.secrets.ibmSoftLayerApiKey | b64enc | quote }} + ibm_sl_username: {{ required "secrets.ibmSoftLayerApiUsername field is required!" .Values.secrets.ibmSoftLayerApiUsername | b64enc | quote }} +{{- end }} +{{- if eq (include "check.azurecreds" .) "true" }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: azure-creds +type: Opaque +data: + azure_tenant_id: {{ required "secrets.azureTenantId field is required!" .Values.secrets.azureTenantId | b64enc | quote }} + azure_client_id: {{ required "secrets.azureClientId field is required!" .Values.secrets.azureClientId | b64enc | quote }} + azure_client_secret: {{ required "secrets.azureClientSecret field is required!" .Values.secrets.azureClientSecret | b64enc | quote }} + azure_resource_group: {{ default "" .Values.secrets.azureResourceGroup | b64enc | quote }} + azure_subscription_id: {{ default "" .Values.secrets.azureSubscriptionID | b64enc | quote }} + azure_resource_manager_endpoint: {{ default "" .Values.secrets.azureResourceMgrEndpoint | b64enc | quote }} + azure_ad_endpoint: {{ default "" .Values.secrets.azureADEndpoint | b64enc | quote }} + azure_ad_resource_id: {{ default "" .Values.secrets.azureADResourceID | b64enc | quote }} + azure_cloud_env_id: {{ default "" .Values.secrets.azureCloudEnvID | b64enc | quote }} +{{- end }} +{{- if eq (include "check.vspherecreds" .) "true" }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + namespace: {{ .Release.Namespace }} + name: vsphere-creds +type: Opaque +data: + vsphere_endpoint: {{ required "secrets.vsphereEndpoint field is required!" .Values.secrets.vsphereEndpoint | b64enc | quote }} + vsphere_username: {{ required "secrets.vsphereUsername field is required!" .Values.secrets.vsphereUsername | b64enc | quote }} + vsphere_password: {{ required "secrets.vspherePassword field is required!" .Values.secrets.vspherePassword | b64enc | quote }} +{{- end }} +{{- if and (eq (include "basicauth.check" .) "true") (not .Values.auth.basicAuth.secretName) }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-basic-auth + namespace: {{ .Release.Namespace }} +data: + auth: {{ required "auth.basicAuth.htpasswd field is required!" .Values.auth.basicAuth.htpasswd | b64enc | quote}} +type: Opaque +{{- end }} +{{- if .Values.auth.tokenAuth.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-token-auth + namespace: {{ .Release.Namespace }} +data: + auth: {{ "true" | b64enc | quote}} +type: Opaque +{{- end }} +{{- if and .Values.auth.oidcAuth.enabled (not .Values.auth.oidcAuth.secretName) }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-oidc-auth + namespace: {{ .Release.Namespace }} +data: + provider-url: {{ required "auth.oidcAuth.providerURL field is required!" .Values.auth.oidcAuth.providerURL | b64enc | quote }} + redirect-url: {{ required "auth.oidcAuth.redirectURL field is required!" .Values.auth.oidcAuth.redirectURL | b64enc | quote }} + client-id: {{ required "auth.oidcAuth.clientID field is required!" .Values.auth.oidcAuth.clientID | b64enc | quote }} + client-secret: {{ required "auth.oidcAuth.clientSecret field is required!" .Values.auth.oidcAuth.clientSecret | b64enc | quote }} + scopes: {{ required "auth.oidcAuth.scopes field is required!" .Values.auth.oidcAuth.scopes | b64enc | quote }} + prompt: {{ default "select_account" .Values.auth.oidcAuth.prompt | b64enc | quote }} + usernameClaim: {{ default "sub" .Values.auth.oidcAuth.usernameClaim | b64enc | quote }} + usernamePrefix: {{ default "" .Values.auth.oidcAuth.usernamePrefix | b64enc | quote }} + groupClaim: {{ default "" .Values.auth.oidcAuth.groupClaim | b64enc | quote }} + groupPrefix: {{ default "" .Values.auth.oidcAuth.groupPrefix | b64enc | quote }} +stringData: + groupAllowList: |- +{{- range $.Values.auth.groupAllowList }} + {{ . -}} +{{ end }} + logout-url: {{ default "" .Values.auth.oidcAuth.logoutURL | b64enc | quote }} +type: Opaque +{{- end }} +{{- if and .Values.auth.openshift.enabled }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-oidc-auth + namespace: {{ .Release.Namespace }} +data: + provider-url: {{ required "auth.openshift.dashboardURL field is required!" (printf "%s/dex" (trimSuffix "/" .Values.auth.openshift.dashboardURL)) | b64enc | quote }} + {{- if .Values.route.enabled }} + redirect-url: {{ required "auth.openshift.dashboardURL field is required!" (trimSuffix "/" (trimSuffix (default .Release.Name .Values.route.path) (trimSuffix "/" .Values.auth.openshift.dashboardURL))) | b64enc | quote }} + {{- else }} + redirect-url: {{ required "auth.openshift.dashboardURL field is required!" (trimSuffix "/" (trimSuffix (default .Release.Name .Values.ingress.urlPath) (trimSuffix "/" .Values.auth.openshift.dashboardURL))) | b64enc | quote }} + {{- end }} + client-id: {{ (printf "kasten") | b64enc | quote }} + client-secret: {{ (printf "kastensecret") | b64enc | quote }} + scopes: {{ (printf "groups profile email") | b64enc | quote }} + prompt: {{ (printf "select_account") | b64enc | quote }} + usernameClaim: {{ default "email" .Values.auth.openshift.usernameClaim | b64enc | quote }} + usernamePrefix: {{ default "" .Values.auth.openshift.usernamePrefix | b64enc | quote }} + groupClaim: {{ default "groups" .Values.auth.openshift.groupClaim | b64enc | quote }} + groupPrefix: {{ default "" .Values.auth.openshift.groupPrefix | b64enc | quote }} +stringData: + groupAllowList: |- +{{- range $.Values.auth.groupAllowList }} + {{ . -}} +{{ end }} +type: Opaque +{{- end }} +{{- if and .Values.auth.ldap.enabled (not .Values.auth.ldap.secretName) }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-oidc-auth + namespace: {{ .Release.Namespace }} +data: + provider-url: {{ required "auth.ldap.dashboardURL field is required!" (printf "%s/dex" (trimSuffix "/" .Values.auth.ldap.dashboardURL)) | b64enc | quote }} + {{- if .Values.route.enabled }} + redirect-url: {{ required "auth.ldap.dashboardURL field is required!" (trimSuffix "/" (trimSuffix (default .Release.Name .Values.route.path) (trimSuffix "/" .Values.auth.ldap.dashboardURL))) | b64enc | quote }} + {{- else }} + redirect-url: {{ required "auth.ldap.dashboardURL field is required!" (trimSuffix "/" (trimSuffix (default .Release.Name .Values.ingress.urlPath) (trimSuffix "/" .Values.auth.ldap.dashboardURL))) | b64enc | quote }} + {{- end }} + client-id: {{ (printf "kasten") | b64enc | quote }} + client-secret: {{ (printf "kastensecret") | b64enc | quote }} + scopes: {{ (printf "groups profile email") | b64enc | quote }} + prompt: {{ (printf "select_account") | b64enc | quote }} + usernameClaim: {{ default "email" .Values.auth.ldap.usernameClaim | b64enc | quote }} + usernamePrefix: {{ default "" .Values.auth.ldap.usernamePrefix | b64enc | quote }} + groupClaim: {{ default "groups" .Values.auth.ldap.groupClaim | b64enc | quote }} + groupPrefix: {{ default "" .Values.auth.ldap.groupPrefix | b64enc | quote }} +stringData: + groupAllowList: |- +{{- range $.Values.auth.groupAllowList }} + {{ . -}} +{{ end }} +type: Opaque +{{- end }} +{{- if and .Values.auth.ldap.enabled (not .Values.auth.ldap.bindPWSecretName) }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-dex + namespace: {{ .Release.Namespace }} +data: + bindPW: {{ required "auth.ldap.bindPW field is required!" .Values.auth.ldap.bindPW | b64enc | quote }} +type: Opaque +{{- end }} +{{- if eq (include "check.primaryKey" . ) "true" }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: +{{ include "helm.labels" . | indent 4 }} + name: k10-encryption-primary-key + namespace: {{ .Release.Namespace }} +data: + {{- if .Values.encryption.primaryKey.awsCmkKeyId }} + awscmkkeyid: {{ default "" .Values.encryption.primaryKey.awsCmkKeyId | trim | b64enc | quote }} + {{- end }} + {{- if .Values.encryption.primaryKey.vaultTransitKeyName }} + vaulttransitkeyname: {{ default "" .Values.encryption.primaryKey.vaultTransitKeyName | trim | b64enc | quote }} + vaulttransitpath: {{ default "transit" .Values.encryption.primaryKey.vaultTransitPath | trim | b64enc | quote }} + {{- end }} +type: Opaque +{{- end }} diff --git a/charts/k10/k10/4.5.1400/templates/serviceaccount.yaml b/charts/k10/k10/4.5.1400/templates/serviceaccount.yaml new file mode 100644 index 000000000..a7704e4e6 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/serviceaccount.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.serviceAccount.create ( not .Values.metering.awsMarketplace ) ( not .Values.metering.awsManagedLicense ) }} +kind: ServiceAccount +apiVersion: v1 +metadata: +{{- if .Values.secrets.awsIamRole }} + annotations: + eks.amazonaws.com/role-arn: {{ .Values.secrets.awsIamRole }} +{{- end }} + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ template "serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} +{{- if and (not ( eq (include "meteringServiceAccountName" .) (include "serviceAccountName" .))) ( not .Values.metering.awsManagedLicense ) .Values.metering.serviceAccount.create }} +--- +kind: ServiceAccount +apiVersion: v1 +metadata: +{{- if .Values.metering.awsMarketPlaceIamRole }} + annotations: + eks.amazonaws.com/role-arn: {{ .Values.metering.awsMarketPlaceIamRole }} +{{- end }} + labels: +{{ include "helm.labels" . | indent 4 }} + name: {{ template "meteringServiceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/charts/k10/k10/4.5.1400/templates/v0services.yaml b/charts/k10/k10/4.5.1400/templates/v0services.yaml new file mode 100644 index 000000000..c62017da3 --- /dev/null +++ b/charts/k10/k10/4.5.1400/templates/v0services.yaml @@ -0,0 +1,165 @@ +{{/* Template to generate service spec for v0 rest services */}} +{{- $container_port := .Values.service.internalPort -}} +{{- $service_port := .Values.service.externalPort -}} +{{- $aggregated_api_port := .Values.service.aggregatedApiPort -}} +{{- $postfix := default .Release.Name .Values.ingress.urlPath -}} +{{- $colocated_services := include "k10.colocatedServices" . | fromYaml -}} +{{- $exposed_services := include "k10.exposedServices" . | splitList " " -}} +{{- $os_postfix := default .Release.Name .Values.route.path -}} +{{- $main_context := . -}} +{{- range append (include "k10.restServices" . | splitList " ") "frontend" }} + {{ if not (hasKey $colocated_services . ) }} +apiVersion: v1 +kind: Service +metadata: + namespace: {{ $.Release.Namespace }} + name: {{ . }}-svc + labels: +{{ include "helm.labels" $ | indent 4 }} + component: {{ . }} + run: {{ . }}-svc +{{ if or (has . $exposed_services) (eq . "frontend") }} + annotations: + getambassador.io/config: | + --- + apiVersion: getambassador.io/v3alpha1 + kind: Mapping + name: {{ . }}-mapping + {{- if $.Values.route.enabled }} + {{- if eq . "frontend" }} + prefix: /{{ $os_postfix | trimPrefix "/" | trimSuffix "/" }}/ + {{- else }} + prefix: /{{ $os_postfix | trimPrefix "/" | trimSuffix "/" }}/{{ . }}-svc/ + {{- end }} + {{- else }} + {{- if eq . "frontend" }} + prefix: /{{ $postfix | trimPrefix "/" | trimSuffix "/" }}/ + {{- else }} + prefix: /{{ $postfix | trimPrefix "/" | trimSuffix "/" }}/{{ . }}-svc/ + {{- end }} + {{- end }} + rewrite: / + service: {{ . }}-svc.{{ $.Release.Namespace }}:{{ $service_port }} + timeout_ms: 30000 + hostname: "*" +{{- $colocatedList := include "get.enabledColocatedSvcList" $main_context | fromYaml }} +{{- range $skip, $secondary := index $colocatedList . }} + {{- $colocConfig := index (include "k10.colocatedServices" . | fromYaml) $secondary }} + {{- if $colocConfig.isExposed }} + --- + apiVersion: getambassador.io/v3alpha1 + kind: Mapping + name: {{ $secondary }}-mapping + prefix: /{{ $postfix }}/{{ $secondary }}-svc/ + rewrite: / + service: {{ $colocConfig.primary }}-svc.{{ $.Release.Namespace }}:{{ $colocConfig.port }} + timeout_ms: 30000 + hostname: "*" + {{- end }} +{{- end }} +{{- end }} +spec: + ports: + - name: http + protocol: TCP + port: {{ $service_port }} + targetPort: {{ $container_port }} +{{- $colocatedList := include "get.enabledColocatedSvcList" $main_context | fromYaml }} +{{- range $skip, $secondary := index $colocatedList . }} + {{- $colocConfig := index (include "k10.colocatedServices" . | fromYaml) $secondary }} + - name: {{ $secondary }} + protocol: TCP + port: {{ $colocConfig.port }} + targetPort: {{ $colocConfig.port }} +{{- end }} +{{- if eq . "logging" }} + - name: logging + protocol: TCP + port: 24224 + targetPort: 24224 + - name: logging-metrics + protocol: TCP + port: 24225 + targetPort: 24225 +{{- end }} + selector: + run: {{ . }}-svc +--- + {{ end }}{{/* if not (hasKey $colocated_services $k10_service ) */}} +{{ end -}}{{/* range append (include "k10.restServices" . | splitList " ") "frontend" */}} +{{- range append (include "k10.services" . | splitList " ") "kanister" }} +apiVersion: v1 +kind: Service +metadata: + namespace: {{ $.Release.Namespace }} + name: {{ . }}-svc + labels: +{{ include "helm.labels" $ | indent 4 }} + component: {{ . }} + run: {{ . }}-svc +spec: + ports: + {{- if eq . "aggregatedapis" }} + - name: http + port: 443 + protocol: TCP + targetPort: {{ $aggregated_api_port }} + {{- else }} + - name: http + protocol: TCP + port: {{ $service_port }} + targetPort: {{ $container_port }} + {{- end }} + {{- if and (eq . "config") ($.Values.injectKanisterSidecar.enabled) }} + - name: https + protocol: TCP + port: 443 + targetPort: {{ $.Values.injectKanisterSidecar.webhookServer.port }} + {{- end }} +{{- $colocatedList := include "get.enabledColocatedSvcList" $main_context | fromYaml }} +{{- range $skip, $secondary := index $colocatedList . }} + {{- $colocConfig := index (include "k10.colocatedServices" . | fromYaml) $secondary }} + - name: {{ $secondary }} + protocol: TCP + port: {{ $colocConfig.port }} + targetPort: {{ $colocConfig.port }} +{{- end }} + selector: + run: {{ . }}-svc +--- +{{ end -}} +{{- if or .Values.auth.dex.enabled (eq (include "check.dexAuth" .) "true") }} +apiVersion: v1 +kind: Service +metadata: + annotations: + getambassador.io/config: | + --- + apiVersion: getambassador.io/v3alpha1 + kind: Mapping + name: dex-mapping + {{- if $.Values.route.enabled }} + prefix: /{{ $os_postfix | trimPrefix "/" | trimSuffix "/" }}/dex/ + {{- else }} + prefix: /{{ $postfix | trimPrefix "/" | trimSuffix "/" }}/dex/ + {{- end }} + rewrite: "" + service: dex.{{ $.Release.Namespace }}:8000 + timeout_ms: 30000 + hostname: "*" + name: dex + namespace: {{ $.Release.Namespace }} + labels: +{{ include "helm.labels" $ | indent 4 }} + component: dex + run: auth-svc +spec: + ports: + - name: http + port: {{ $service_port }} + protocol: TCP + targetPort: 8080 + selector: + run: auth-svc + type: ClusterIP +{{ end -}} diff --git a/charts/k10/k10/4.5.1400/triallicense b/charts/k10/k10/4.5.1400/triallicense new file mode 100644 index 000000000..cfe6dd46b --- /dev/null +++ b/charts/k10/k10/4.5.1400/triallicense @@ -0,0 +1 @@ +Y3VzdG9tZXJOYW1lOiB0cmlhbHN0YXJ0ZXItbGljZW5zZQpkYXRlRW5kOiAnMjEwMC0wMS0wMVQwMDowMDowMC4wMDBaJwpkYXRlU3RhcnQ6ICcyMDIwLTAxLTAxVDAwOjAwOjAwLjAwMFonCmZlYXR1cmVzOgogIHRyaWFsOiBudWxsCmlkOiB0cmlhbC0wOWY4MzE5Zi0xODBmLTRhOTAtOTE3My1kOTJiNzZmMTgzNWUKcHJvZHVjdDogSzEwCnJlc3RyaWN0aW9uczoKICBub2RlczogNTAwCnNlcnZpY2VBY2NvdW50S2V5OiBudWxsCnZlcnNpb246IHYxLjAuMApzaWduYXR1cmU6IEYxbnVLUFV5STJtbDJGMmV1VHdGOXNZRTZMVU5rQ3ZiR2tTV1ZkT0ZqdERCb1B6SjUyVWFsVkFmRjVmQUxpcm5BcVhkcERnYi9YcnpxSEYrTE0xS2pEMVdXUFd0ZUdXNFc1anBPSFN0T296Y0c5M0pUUHF5M2l6TVk3RmczZVFLYTZzWDhBdnFwOXArWXVBMWNwTENlQ2dsR2dnOTVzSUFmYmRMMTBmV2d2RmR6QUt4dUZLN2psRzVtbG1CRVF5R0hrYWdoZFIrVGxzeUNTNEFkbXVBOEZodVUwZnRBdXN0b1M3R2JKd1BuTFI3STFZY1Q4OW8wU2xRZEJ2Yjg2QzdKbm1OdnY0aHhiSUo5TTJvWGJPSnQ4ZnBNcjhNWFR6YWRMTWJzSndhZ3VBVHlNUWF2cExHNXRPb0U2ZE1uMVlFVDZLdWZiYy9NdThVRDVYYXlDYTdkZz09Cg== diff --git a/charts/k10/k10/4.5.1400/values.schema.json b/charts/k10/k10/4.5.1400/values.schema.json new file mode 100644 index 000000000..0437e8d1b --- /dev/null +++ b/charts/k10/k10/4.5.1400/values.schema.json @@ -0,0 +1,1089 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema", + "type": "object", + "properties": { + "serviceAccount": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + } + }, + "image": { + "type": "object", + "properties": { + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "image": { + "type": "string" + }, + "tag": { + "type": "string" + }, + "pullPolicy": { + "type": "string" + } + } + }, + "scc": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "networkPolicy": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "global": { + "type": "object", + "properties": { + "airgapped": { + "type": "object", + "properties": { + "repository": { + "type": "string" + } + } + }, + "persistence": { + "type": "object", + "properties": { + "mountPath": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "storageClass": { + "type": "string" + }, + "accessMode": { + "type": "string" + }, + "size": { + "type": "string" + }, + "metering": { + "type": "object", + "properties": { + "size": { + "type": "string" + } + } + }, + "catalog": { + "type": "object", + "properties": { + "size": { + "type": "string" + } + } + }, + "jobs": { + "type": "object", + "properties": { + "size": { + "type": "string" + } + } + }, + "logging": { + "type": "object", + "properties": { + "size": { + "type": "string" + } + } + } + } + }, + "upstreamCertifiedImages": { + "type": "boolean" + }, + "rhMarketPlace": { + "type": "boolean" + }, + "images": { + "type": "object", + "properties": { + "aggregatedapis": { + "type": "string" + }, + "auth": { + "type": "string" + }, + "catalog": { + "type": "string" + }, + "config": { + "type": "string" + }, + "crypto": { + "type": "string" + }, + "dashboardbff": { + "type": "string" + }, + "executor": { + "type": "string" + }, + "frontend": { + "type": "string" + }, + "jobs": { + "type": "string" + }, + "kanister": { + "type": "string" + }, + "logging": { + "type": "string" + }, + "metering": { + "type": "string" + }, + "state": { + "type": "string" + }, + "ambassador": { + "type": "string" + }, + "prometheus": { + "type": "string" + }, + "configmap-reload": { + "type": "string" + }, + "dex": { + "type": "string" + }, + "kanister-tools": { + "type": "string" + }, + "upgrade": { + "type": "string" + }, + "cephtool": { + "type": "string" + }, + "datamover": { + "type": "string" + } + } + } + } + }, + "metering": { + "type": "object", + "properties": { + "reportingKey": { + "type": "string" + }, + "consumerId": { + "type": "string" + }, + "awsMarketPlaceIamRole": { + "type": "string" + }, + "awsRegion": { + "type": "string" + }, + "serviceAccount": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + } + } + }, + "licenseConfigSecretName": { + "type": "string" + }, + "mode": { + "type": "string" + }, + "reportCollectionPeriod": { + "type": "integer" + }, + "reportPushPeriod": { + "type": "integer" + }, + "promoID": { + "type": "string" + } + } + }, + "route": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "host": { + "type": "string" + }, + "path": { + "type": "string" + }, + "annotations": { + "type": "object" + }, + "labels": { + "type": "object" + }, + "tls": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "insecureEdgeTerminationPolicy": { + "type": "string" + }, + "termination": { + "type": "string" + } + } + } + } + }, + "toolsImage": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "pullPolicy": { + "type": "string" + } + } + }, + "ambassadorImage": { + "type": "object", + "properties": { + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "image": { + "type": "string" + } + } + }, + "dexImage": { + "type": "object", + "properties": { + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "image": { + "type": "string" + } + } + }, + "kanisterToolsImage": { + "type": "object", + "properties": { + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "image": { + "type": "string" + }, + "pullPolicy": { + "type": "string" + } + } + }, + "eula": { + "type": "object", + "properties": { + "accept": { + "type": "boolean" + } + } + }, + "license": { + "type": "string" + }, + "prometheus": { + "type": "object", + "properties": { + "k10image": { + "type": "object", + "properties": { + "registry": { + "type": "string" + }, + "repository": { + "type": "string" + } + } + }, + "initChownData": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "rbac": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "alertmanager": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "kubeStateMetrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "networkPolicy": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "nodeExporter": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "pushgateway": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "scrapeCAdvisor": { + "type": "boolean" + }, + "server": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "securityContext": { + "type": "object", + "properties": { + "runAsUser": { + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsGroup": { + "type": "integer" + }, + "fsGroup": { + "type": "integer" + } + } + }, + "retention": { + "type": "string" + }, + "strategy": { + "type": "object", + "properties": { + "rollingUpdate": { + "type": "object", + "properties": { + "maxSurge": { + "type": "string" + }, + "maxUnavailable": { + "type": "string" + } + } + }, + "type": { + "type": "string" + } + } + }, + "persistentVolume": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "storageClass": { + "type": "string" + } + } + }, + "configMapOverrideName": { + "type": "string" + }, + "fullnameOverride": { + "type": "string" + }, + "baseURL": { + "type": "string" + }, + "prefixURL": { + "type": "string" + } + } + }, + "serviceAccounts": { + "type": "object", + "properties": { + "alertmanager": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "kubeStateMetrics": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "nodeExporter": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "pushgateway": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "server": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + } + } + } + } + }, + "service": { + "type": "object", + "properties": { + "externalPort": { + "type": "integer" + }, + "internalPort": { + "type": "integer" + }, + "aggregatedApiPort": { + "type": "integer" + }, + "gatewayAdminPort": { + "type": "integer" + } + } + }, + "secrets": { + "type": "object", + "properties": { + "awsAccessKeyId": { + "type": "string" + }, + "awsSecretAccessKey": { + "type": "string" + }, + "awsIamRole": { + "type": "string" + }, + "googleApiKey": { + "type": "string" + }, + "dockerConfig": { + "type": "string" + }, + "dockerConfigPath": { + "type": "string" + }, + "azureTenantId": { + "type": "string" + }, + "azureClientId": { + "type": "string" + }, + "azureClientSecret": { + "type": "string" + }, + "azureResourceGroup": { + "type": "string" + }, + "azureSubscriptionID": { + "type": "string" + }, + "azureResourceMgrEndpoint": { + "type": "string" + }, + "azureADEndpoint": { + "type": "string" + }, + "azureADResourceID": { + "type": "string" + }, + "apiTlsCrt": { + "type": "string" + }, + "apiTlsKey": { + "type": "string" + }, + "ibmSoftLayerApiKey": { + "type": "string" + }, + "ibmSoftLayerApiUsername": { + "type": "string" + }, + "vsphereEndpoint": { + "type": "string" + }, + "vsphereUsername": { + "type": "string" + }, + "vspherePassword": { + "type": "string" + } + } + }, + "clusterName": { + "type": "string" + }, + "executorReplicas": { + "type": "integer" + }, + "logLevel": { + "type": "string" + }, + "apiservices": { + "type": "object", + "properties": { + "deployed": { + "type": "boolean" + } + } + }, + "injectKanisterSidecar": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "namespaceSelector": { + "type": "object", + "properties": { + "matchLabels": { + "type": "object" + } + } + }, + "objectSelector": { + "type": "object", + "properties": { + "matchLabels": { + "type": "object" + } + } + }, + "webhookServer": { + "type": "object", + "properties": { + "port": { + "type": "integer" + } + } + } + } + }, + "kanisterPodCustomLabels": { + "type": "string" + }, + "kanisterPodCustomAnnotations": { + "type": "string" + }, + "resources": { + "type": "object" + }, + "services": { + "type": "object", + "properties": { + "executor": { + "type": "object", + "properties": { + "hostNetwork": { + "type": "boolean" + } + } + }, + "dashboardbff": { + "type": "object", + "properties": { + "hostNetwork": { + "type": "boolean" + } + } + }, + "securityContext": { + "type": "object", + "properties": { + "runAsUser": { + "type": "integer" + }, + "fsGroup": { + "type": "integer" + } + } + } + } + }, + "apigateway": { + "type": "object", + "properties": { + "serviceResolver": { + "type": "string" + } + } + }, + "limiter": { + "type": "object", + "properties": { + "genericVolumeSnapshots": { + "type": "integer" + }, + "genericVolumeCopies": { + "type": "integer" + }, + "genericVolumeRestores": { + "type": "integer" + }, + "csiSnapshots": { + "type": "integer" + }, + "providerSnapshots": { + "type": "integer" + } + } + }, + "gateway": { + "type": "object", + "properties": { + "insecureDisableSSLVerify": { + "type": "boolean" + } + } + }, + "kanisterWithKopia": { + "type": "boolean" + }, + "ingress": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "tls": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "class": { + "type": "string" + }, + "host": { + "type": "string" + }, + "urlPath": { + "type": "string" + } + } + }, + "genericVolumeSnapshot": { + "type": "object", + "properties": { + "resources": { + "type": "object", + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string" + }, + "cpu": { + "type": "string" + } + } + }, + "limits": { + "type": "object", + "properties": { + "memory": { + "type": "string" + }, + "cpu": { + "type": "string" + } + } + } + } + } + } + } + }, + "jaeger": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "agentDNS": { + "type": "string" + } + } + }, + "cacertconfigmap": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + }, + "externalGateway": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + }, + "annotations": { + "type": "object" + }, + "fqdn": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "awsSSLCertARN": { + "type": "string" + } + } + }, + "auth": { + "type": "object", + "properties": { + "groupAllowList": { + "type": "array", + "items": { + "type": "string" + } + }, + "basicAuth": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "secretName": { + "type": "string" + }, + "htpasswd": { + "type": "string" + } + } + }, + "tokenAuth": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "oidcAuth": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "providerURL": { + "type": "string" + }, + "redirectURL": { + "type": "string" + }, + "scopes": { + "type": "string" + }, + "prompt": { + "type": "string" + }, + "clientID": { + "type": "string" + }, + "clientSecret": { + "type": "string" + }, + "usernameClaim": { + "type": "string" + }, + "usernamePrefix": { + "type": "string" + }, + "groupClaim": { + "type": "string" + }, + "groupPrefix": { + "type": "string" + }, + "logoutURL": { + "type": "string" + }, + "secretName": { + "type": "string" + } + } + }, + "dex": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "providerURL": { + "type": "string" + }, + "redirectURL": { + "type": "string" + } + } + }, + "openshift": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "serviceAccount": { + "type": "string" + }, + "clientSecret": { + "type": "string" + }, + "dashboardURL": { + "type": "string" + }, + "openshiftURL": { + "type": "string" + }, + "insecureCA": { + "type": "boolean" + }, + "useServiceAccountCA": { + "type": "boolean" + }, + "secretName": { + "type": "string" + }, + "usernameClaim": { + "type": "string" + }, + "usernamePrefix": { + "type": "string" + }, + "groupnameClaim": { + "type": "string" + }, + "groupnamePrefix": { + "type": "string" + } + } + }, + "ldap": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "restartPod": { + "type": "boolean" + }, + "dashboardURL": { + "type": "string" + }, + "host": { + "type": "string" + }, + "insecureNoSSL": { + "type": "boolean" + }, + "insecureSkipVerifySSL": { + "type": "boolean" + }, + "startTLS": { + "type": "boolean" + }, + "bindDN": { + "type": "string" + }, + "bindPW": { + "type": "string" + }, + "bindPWSecretName": { + "type": "string" + }, + "userSearch": { + "type": "object", + "properties": { + "baseDN": { + "type": "string" + }, + "filter": { + "type": "string" + }, + "username": { + "type": "string" + }, + "idAttr": { + "type": "string" + }, + "emailAttr": { + "type": "string" + }, + "nameAttr": { + "type": "string" + }, + "preferredUsernameAttr": { + "type": "string" + } + } + }, + "groupSearch": { + "type": "object", + "properties": { + "baseDN": { + "type": "string" + }, + "filter": { + "type": "string" + }, + "userMatchers": { + "type": "array", + "items": { + "type": "string" + } + }, + "nameAttr": { + "type": "string" + } + } + }, + "secretName": { + "type": "string" + }, + "usernameClaim": { + "type": "string" + }, + "usernamePrefix": { + "type": "string" + }, + "groupnameClaim": { + "type": "string" + }, + "groupnamePrefix": { + "type": "string" + } + } + }, + "k10AdminUsers": { + "type": "array", + "items": { + "type": "string" + } + }, + "k10AdminGroups": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "cluster": { + "type": "object", + "properties": { + "domainName": { + "type": "string" + } + } + } +} diff --git a/charts/k10/k10/4.5.1400/values.yaml b/charts/k10/k10/4.5.1400/values.yaml new file mode 100644 index 000000000..ea4e28c7d --- /dev/null +++ b/charts/k10/k10/4.5.1400/values.yaml @@ -0,0 +1,456 @@ +# Default values for k10. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +image: + registry: gcr.io + repository: kasten-images + image: '' + tag: '' + pullPolicy: Always + +rbac: + create: true +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is derived using the release and chart names. + name: "" + +scc: + create: false + +networkPolicy: + create: true + +# Empty value of airgapped.repository specifies that the installation is +# going to be online and if we provide this value using --set flag that +# means that the installation is going to be offline +global: + airgapped: + repository: '' + persistence: + mountPath: "/mnt/k10state" + enabled: true + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + accessMode: ReadWriteOnce + size: 20Gi + metering: + size: 2Gi + catalog: + size: "" + jobs: + size: "" + logging: + size: "" + grafana: + # Default value is set to 5Gi. This is the same as the default value + # from previous releases <= 4.5.1 where the Grafana sub chart used to + # reference grafana.persistence.size instead of the global values. + # Since the size remains the same across upgrades, the Grafana PVC + # is not deleted and recreated which means no Grafana data is lost + # while upgrading from <= 4.5.1 + size: 5Gi + ## Do we want to use certified version to upstream container images + ## TODO: @viveksinghggits, we don't need this anymore + upstreamCertifiedImages: false + ## Set it to true while geenerating helm operator + rhMarketPlace: false + ## these values should not be provided us, these are to be used by + ## red hat marketplace + images: + admin: '' + aggregatedapis: '' + auth: '' + catalog: '' + config: '' + crypto: '' + dashboardbff: '' + events: '' + executor: '' + frontend: '' + jobs: '' + kanister: '' + logging: '' + metering: '' + state: '' + emissary: '' + prometheus: '' + configmap-reload: '' + dex: '' + kanister-tools: '' + upgrade: '' + cephtool: '' + datamover: '' + bloblifecyclemanager: '' + vbrintegrationapi: '' + grafana: '' + imagePullSecret: '' + ingress: + create: false + urlPath: "" #url path for k10 gateway + route: + enabled: false + path: "" + + +## OpenShift route configuration. +route: + enabled: false + # Host name for the route + host: "" + # Default path for the route + path: "" + + annotations: {} + # kubernetes.io/tls-acme: "true" + # haproxy.router.openshift.io/disable_cookies: "true" + # haproxy.router.openshift.io/balance: roundrobin + + labels: {} + # key: value + + # TLS configuration + tls: + enabled: false + # What to do in case of an insecure traffic edge termination + insecureEdgeTerminationPolicy: "Redirect" + # Where this TLS configuration should terminate + termination: "edge" + +toolsImage: + enabled: true + pullPolicy: Always + +ambassadorImage: + registry: docker.io + repository: emissaryingress + image: emissary + +dexImage: + registry: quay.io + repository: dexidp + image: dex + +kanisterToolsImage: + registry: ghcr.io + repository: kanisterio + image: kanister-tools + pullPolicy: Always + +ingress: + create: false + tls: + enabled: false + class: "" #Ingress controller type + host: "" #ingress object host name + urlPath: "" #url path for k10 gateway + pathType: "" + +eula: + accept: false #true value if EULA accepted + +license: "" #base64 encoded string provided by Kasten + +cluster: + domainName: "cluster.local" #default value is cluster.local + +prometheus: + k10image: + # take this value from image.repository + registry: gcr.io + repository: kasten-images + # Disabling init container + # which uses root cmds + initChownData: + enabled: false + rbac: + create: false + alertmanager: + enabled: false + kubeStateMetrics: + enabled: false + networkPolicy: + enabled: true + nodeExporter: + enabled: false + pushgateway: + enabled: false + scrapeCAdvisor: false + server: + # UID and groupid are from prometheus helm chart + enabled: true + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + retention: 30d + strategy: + rollingUpdate: + maxSurge: 100% + maxUnavailable: 100% + type: RollingUpdate + persistentVolume: + enabled: true + storageClass: "" + configMapOverrideName: k10-prometheus-config + fullnameOverride: prometheus-server + baseURL: /k10/prometheus/ + prefixURL: /k10/prometheus + serviceAccounts: + alertmanager: + create: false + kubeStateMetrics: + create: false + nodeExporter: + create: false + pushgateway: + create: false + server: + create: true + +jaeger: + enabled: false + agentDNS: "" + +service: + externalPort: 8000 + internalPort: 8000 + aggregatedApiPort: 10250 + gatewayAdminPort: 8877 + +secrets: + awsAccessKeyId: '' + awsSecretAccessKey: '' + awsIamRole: '' + googleApiKey: '' + dockerConfig: '' + dockerConfigPath: '' + azureTenantId: '' + azureClientId: '' + azureClientSecret: '' + azureResourceGroup: '' + azureSubscriptionID: '' + azureResourceMgrEndpoint: '' + azureADEndpoint: '' + azureADResourceID: '' + azureCloudEnvID: '' + apiTlsCrt: '' + apiTlsKey: '' + ibmSoftLayerApiKey: '' + ibmSoftLayerApiUsername: '' + vsphereEndpoint: '' + vsphereUsername: '' + vspherePassword: '' + +metering: + reportingKey: "" #[base64-encoded key] + consumerId: "" #project: + awsRegion: '' + awsMarketPlaceIamRole: '' + awsMarketplace: false # AWS cloud metering license mode + awsManagedLicense: false # AWS managed license mode + licenseConfigSecretName: '' # AWS managed license config secret for non-eks clusters + serviceAccount: + create: false + name: "" + mode: '' # controls metric and license reporting (set to `airgap` for private-network installs) + redhatMarketplacePayg: false # Redhat cloud metering license mode + reportCollectionPeriod: 1800 # metric report collection period in seconds + reportPushPeriod: 3600 # metric report push period in seconds + promoID: '' # sets the K10 promotion ID + +clusterName: '' +executorReplicas: 3 +logLevel: info + +externalGateway: + create: false + # Any standard service annotations + annotations: {} + # Host and domain name for the K10 API server + fqdn: + name: "" + #Supported types route53-mapper, external-dns + type: "" + # ARN for the AWS ACM SSL certificate used in the K10 API server (load balancer) + awsSSLCertARN: '' + +auth: + groupAllowList: [] +# - "group1" +# - "group2" + basicAuth: + enabled: false + secretName: "" #htpasswd based existing secret + htpasswd: "" #htpasswd string, which will be used for basic auth + tokenAuth: + enabled: false + oidcAuth: + enabled: false + providerURL: "" #URL to your OIDC provider + redirectURL: "" #URL to the K10 gateway service + scopes: "" #Space separated OIDC scopes required for userinfo. Example: "profile email" + prompt: "" #The prompt type to be requested with the OIDC provider. Default is select_account. + clientID: "" #ClientID given by the OIDC provider for K10 + clientSecret: "" #ClientSecret given by the OIDC provider for K10 + usernameClaim: "" #Claim to be used as the username + usernamePrefix: "" #Prefix that has to be used with the username obtained from the username claim + groupClaim: "" #Name of a custom OpenID Connect claim for specifying user groups + groupPrefix: "" #All groups will be prefixed with this value to prevent conflicts. + logoutURL: "" #URL to your OIDC provider's logout endpoint + #OIDC config based existing secret. + #Must include providerURL, redirectURL, scopes, clientID/secret and logoutURL. + secretName: "" + dex: + enabled: false + providerURL: "" + redirectURL: "" + openshift: + enabled: false + serviceAccount: "" #service account used as the OAuth client + clientSecret: "" #The token from the service account + dashboardURL: "" #The URL for accessing K10's dashboard + openshiftURL: "" #The URL of the Openshift API server + insecureCA: false + useServiceAccountCA: false + secretName: "" # The Kubernetes Secret that contains OIDC settings + usernameClaim: "email" + usernamePrefix: "" + groupnameClaim: "groups" + groupnamePrefix: "" + ldap: + enabled: false + restartPod: false # Enable this value to force a restart of the authentication service pod + dashboardURL: "" #The URL for accessing K10's dashboard + host: "" + insecureNoSSL: false + insecureSkipVerifySSL: false + startTLS: false + bindDN: "" + bindPW: "" + bindPWSecretName: "" + userSearch: + baseDN: "" + filter: "" + username: "" + idAttr: "" + emailAttr: "" + nameAttr: "" + preferredUsernameAttr: "" + groupSearch: + baseDN: "" + filter: "" + userMatchers: [] +# - userAttr: +# groupAttr: + nameAttr: "" + secretName: "" # The Kubernetes Secret that contains OIDC settings + usernameClaim: "email" + usernamePrefix: "" + groupnameClaim: "groups" + groupnamePrefix: "" + k10AdminUsers: [] + k10AdminGroups: [] + +optionalColocatedServices: + vbrintegrationapi: + enabled: false + +cacertconfigmap: + name: "" #Name of the configmap + +apiservices: + deployed: true # If false APIService objects will not be deployed + +injectKanisterSidecar: + enabled: false + namespaceSelector: + matchLabels: {} + # Set objectSelector to filter workloads + objectSelector: + matchLabels: {} + webhookServer: + port: 8080 # should not conflict with config server port (8000) + +kanisterPodCustomLabels : "" + +kanisterPodCustomAnnotations : "" + +genericVolumeSnapshot: + resources: + requests: + memory: "" + cpu: "" + limits: + memory: "" + cpu: "" + +resources: {} + +services: + executor: + hostNetwork: false + dashboardbff: + hostNetwork: false + securityContext: + runAsUser: 1000 + fsGroup: 1000 + aggregatedapis: + hostNetwork: false + +apigateway: + serviceResolver: dns + +limiter: + genericVolumeSnapshots: 10 + genericVolumeCopies: 10 + genericVolumeRestores: 10 + csiSnapshots: 10 + providerSnapshots: 10 + +gateway: + insecureDisableSSLVerify: false + exposeAdminPort: true + +kanister: + backupTimeout: 45 + restoreTimeout: 600 + deleteTimeout: 45 + hookTimeout: 20 + checkRepoTimeout: 20 + statsTimeout: 20 + efsPostRestoreTimeout: 45 + podReadyWaitTimeout: 15 + +awsConfig: + assumeRoleDuration: "" + efsBackupVaultName: "k10vault" + +grafana: + enabled: true + prometheusName: prometheus-server + prometheusPrefixURL: /k10/prometheus + rbac: + namespaced: true + pspEnabled: false + +encryption: + primaryKey: # primaryKey is used for enabling encryption of K10 primary key + awsCmkKeyId: '' # Ensures AWS CMK is used for encrypting K10 primary key + vaultTransitKeyName: '' + vaultTransitPath: '' + +vmWare: + taskTimeoutMin: "" + +vault: + secretName: "" + address: "http://vault:8200" diff --git a/index.yaml b/index.yaml index 5908ecf9e..ab9ba810c 100755 --- a/index.yaml +++ b/index.yaml @@ -1931,6 +1931,37 @@ entries: - assets/haproxy/haproxy-1.4.300.tgz version: 1.4.300 hpe-csi-driver: + - annotations: + artifacthub.io/license: Apache-2.0 + artifacthub.io/links: | + - name: Documentation + url: https://scod.hpedev.io/csi_driver + artifacthub.io/prerelease: "false" + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: HPE CSI Driver for Kubernetes + catalog.cattle.io/release-name: hpe-csi-driver + apiVersion: v1 + appVersion: 2.1.1 + created: "2022-04-22T14:30:11.475699-07:00" + description: A Helm chart for installing the HPE CSI Driver for Kubernetes + digest: 1cd907895576c00b94500dfe9de895c217040cb10982bb0a22691b6d32499c36 + home: https://hpe.com/storage/containers + icon: https://raw.githubusercontent.com/hpe-storage/co-deployments/master/docs/assets/hpedev.png + keywords: + - HPE + - Storage + - CSI + kubeVersion: 1.21 - 1.23 + maintainers: + - email: datamattsson@hpe.com + name: datamattsson + name: hpe-csi-driver + sources: + - https://github.com/hpe-storage/co-deployments + - https://github.com/hpe-storage/csi-driver + urls: + - assets/hpe-csi-driver/hpe-csi-driver-2.1.1.tgz + version: 2.1.1 - annotations: artifacthub.io/license: Apache-2.0 artifacthub.io/links: | @@ -2462,6 +2493,25 @@ entries: - assets/k8s-triliovault-operator/k8s-triliovault-operator-v2.0.200.tgz version: v2.0.200 k10: + - annotations: + catalog.cattle.io/certified: partner + catalog.cattle.io/display-name: K10 + catalog.cattle.io/release-name: k10 + apiVersion: v2 + appVersion: 4.5.14 + created: "2022-04-23T12:56:38.613073597Z" + description: Kasten’s K10 Data Management Platform + digest: fe0e8ddc10e04937a4690ddb5818748c373f58d5a928cf4030c873d6e2daa160 + home: https://kasten.io/ + icon: https://docs.kasten.io/_static/kasten-logo-vertical.png + kubeVersion: '>= 1.17.0-0' + maintainers: + - email: support@kasten.io + name: kastenIO + name: k10 + urls: + - assets/k10/k10-4.5.1400.tgz + version: 4.5.1400 - annotations: catalog.cattle.io/certified: partner catalog.cattle.io/display-name: K10